prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3.8.8 64-bit ('cam')
# language: python
# name: python388jvsc74a57bd0acafb728b15233fa3654ff8b422c21865df0ca42ea3b74670e1f2f098ebd61ca
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Python: Pandas Series</h1>
# <h2 style="text-align:center;">Coding Akademie München GmbH</h2>
# <br/>
# <div style="text-align:center;">Dr. <NAME></div>
# <div style="text-align:center;"><NAME></div>
# %% [markdown] slideshow={"slide_type": "slide"}
#
# # Der Typ `Series`
#
# Der Pandas Typ `Series` repräsentiert eine Folge von Werten, die ähnlich wie eine Python Liste numerisch indiziert werden kann, gleichzeitig aber auch einen semantisch sinnvollerern Index haben kann, z.B. Daten für Zeitreihen.
#
# Intern wird ein `Series`-Objekt durch ein NumPy Array realisiert, daher sind die meisten Operationen von NumPy Arrays auch auf Pandas-`Series`-Objekte anwendbar.
# %%
import numpy as np
import pandas as pd
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Erzeugen von Serien
#
# ### Aus Listen
# %%
pd.Series(data=[10, 20, 30, 40])
# %%
pd.Series(['a', 'b', 'c'])
# %% [markdown] slideshow={"slide_type": "subslide"}
# ### Aus Listen mit Index
# %%
pd.Series(data=[1, 2, 3, 4], index=['w', 'x', 'y', 'z'])
# %% [markdown] slideshow={"slide_type": "subslide"}
# ### Aus NumPy Arrays
# %%
arr = np.arange(5)
indices = 'a b c d e'.split()
# %%
pd.Series(data=arr)
# %%
| pd.Series(arr, index=indices) | pandas.Series |
import csv
import glob
import math
import os
import sys
from random import random, seed
from timeit import default_timer as timer
import time
from statistics import mean
from pathlib import Path
import networkx as nx
import numpy as np
from scapy.layers.inet import IP, UDP
from scapy.utils import PcapWriter, PcapReader
import tkinter as tk
from tkinter import filedialog
import zat
from zat.log_to_dataframe import LogToDataFrame
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
import matplotlib.transforms as mtrans
class Original_Dataset_Analysis():
@staticmethod
def calculate_avg_connection_length_per_detailed_label(path_to_avg_connection_length, path_to_storage):
csv_files = glob.glob(path_to_avg_connection_length + "/*.csv")
df_list = []
for csv_file in csv_files:
csv_df = pd.read_csv(csv_file)
df_list.append(csv_df)
summary_df = df_list.pop()
loop_length = len(df_list)
for to_add_df in range(loop_length):
summary_df = summary_df.append(df_list.pop())
summary_df["length"] = summary_df.length.astype(int)
avg_length_connection = summary_df.groupby("detailed_label")["length"].mean()
avg_length_connection.to_csv(path_to_storage)
@staticmethod
def restart_determine_connection_length(path_to_iot_scenarios_folder, folder_to_store):
path_to_iot_scenarios_folder = path_to_iot_scenarios_folder
folder_to_store = folder_to_store
scanned_files = sorted([f.path for f in os.scandir(folder_to_store)])
scanned_files = sorted(list(set([os.path.basename(x).split("_", maxsplit=1)[0] for x in scanned_files])))
folders = sorted([f.path for f in os.scandir(path_to_iot_scenarios_folder) if f.is_dir()])
folders = [(x, os.path.basename(x)) for x in folders]
to_scan_files = []
for path, scenario in folders:
if scenario not in scanned_files:
to_scan_files.append(path)
folders = to_scan_files
for index, folder in enumerate(folders):
scenario_name = str(os.path.basename(folder)).strip()
print("Scenario: " + str(index + 1) + "/" + str(len(folders)))
print("Scenario name: " + scenario_name)
pcap_files = glob.glob(folder + "/*.pcap")
for index_file, pcap_file in enumerate(pcap_files):
file_name = str(os.path.basename(pcap_file)).strip()
path_to_pcap_file = pcap_file
print("File: " + str(index_file + 1) + "/" + str(len(pcap_files)))
print("File name : " + file_name)
summary_csv_path = folder_to_store + "/" + scenario_name + "_" + file_name + "_con_length.csv"
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
new_row = ["src_ip","dst_ip","length"]
csv_writer.writerow(new_row)
csvfile.close()
appended_packet_counter = 0
connections = {}
write_counter = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
if IP in packet:
src_ip = packet[IP].src
dst_ip = packet[IP].dst
if (src_ip, dst_ip) not in connections:
connections[(src_ip, dst_ip)] = 0
appended_packet_counter = appended_packet_counter + 1
old_entry = connections[(src_ip, dst_ip)]
new_entry = old_entry + 1
connections[(src_ip, dst_ip)] = new_entry
if appended_packet_counter == 1500000:
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for (src_ip, dst_ip), amount in connections.items():
new_line = [src_ip, dst_ip, amount]
csv_writer.writerow(new_line)
csvfile.close()
appended_packet_counter = 0
connections.clear()
appended_packet_counter = 0
print("Write " + str(write_counter) + " Finish")
write_counter = write_counter + 1
packets.close()
if (len(connections) > 0):
print("Write " + str(write_counter))
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for (src_ip, dst_ip), amount in connections.items():
new_line = [src_ip, dst_ip , amount]
csv_writer.writerow(new_line)
csvfile.close()
connections.clear()
sys.exit()
@staticmethod
def adding_detailed_label_info_to_connection_list(path_to_avg_length_files, path_to_detailed_labels, path_to_storage):
checked_files = sorted([f.path for f in os.scandir(path_to_storage)])
checked_files = list(map(lambda x: os.path.basename(x), checked_files))
avg_length_files = sorted([f.path for f in os.scandir(path_to_avg_length_files)])
avg_length_files = list(map(lambda x: (os.path.basename(x), x), avg_length_files))
to_check_files = []
for file_name, path in avg_length_files:
if file_name not in checked_files:
to_check_files.append(path)
for file_index, file_path in enumerate(to_check_files):
combined_file_name = os.path.basename(file_path)
scenario = combined_file_name.split("_", maxsplit=1)[0]
file = str(combined_file_name.split("_", maxsplit=1)[1].split(".pcap")[0])
print("File " + str(file_index + 1) + "/" + str(len(to_check_files)))
print("Scenario name " + str(scenario))
print("File name " + str(file))
csv_df = pd.read_csv(file_path)
csv_df = csv_df.groupby(["src_ip", "dst_ip"])["length"].sum().to_frame().reset_index()
csv_df["scenario"] = scenario
csv_df["file"] = file
csv_df = csv_df.sort_values(['src_ip', 'dst_ip'])
csv_df = csv_df.set_index(['src_ip', 'dst_ip'])
path_to_logg_file = path_to_detailed_labels + "/" + scenario + "/bro/conn.log.labeled"
zat = LogToDataFrame()
bro_original_df = zat.create_dataframe(path_to_logg_file)
bro_original_df["label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[1].strip())
bro_original_df["detailed_label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[2].strip())
bro_original_df = bro_original_df.rename(columns={"id.orig_h": "src_ip", "id.resp_h": "dst_ip"})
bro_original_df = bro_original_df.drop(
columns=['uid', 'id.orig_p', 'id.resp_p', 'proto', 'service', 'duration', 'orig_bytes',
'resp_bytes',
'conn_state', 'local_orig', 'local_resp', 'missed_bytes', 'history', 'orig_pkts',
'orig_ip_bytes',
'resp_pkts', 'resp_ip_bytes', 'tunnel_parents label detailed-label'])
bro_original_df.sort_values(["src_ip", "dst_ip"], inplace=True)
bro_original_df = bro_original_df.groupby(['src_ip', 'dst_ip'])[
'detailed_label'].value_counts().to_frame()
bro_original_df = bro_original_df.rename(columns={"detailed_label": "detailed_label_count"})
bro_original_df = bro_original_df.drop(columns="detailed_label_count")
bro_original_df = bro_original_df.reset_index()
bro_original_df = bro_original_df.sort_values(by=['src_ip', 'dst_ip'])
bro_original_df = bro_original_df.set_index(['src_ip', 'dst_ip'])
merged_df = csv_df.merge(on=['src_ip', 'dst_ip'], right=bro_original_df, how="inner")
merged_df = merged_df.reset_index()
addition_csv_path = path_to_storage + "/" + combined_file_name
merged_df.to_csv(addition_csv_path, index=False)
@staticmethod
def determine_connection_length(path_to_iot_scenarios_folder, folder_to_store):
path_to_iot_scenarios_folder = path_to_iot_scenarios_folder
folder_to_store = folder_to_store
folders = sorted([f.path for f in os.scandir(path_to_iot_scenarios_folder) if f.is_dir()])
for index, folder in enumerate(folders):
scenario_name = str(os.path.basename(folder)).strip()
print("Scenario: " + str(index + 1) + "/" + str(len(folders)))
print("Scenario name: " + scenario_name)
pcap_files = glob.glob(folder + "/*.pcap")
for index_file, pcap_file in enumerate(pcap_files):
file_name = str(os.path.basename(pcap_file)).strip()
path_to_pcap_file = pcap_file
print("File: " + str(index_file + 1) + "/" + str(len(pcap_files)))
print("File name : " + file_name)
summary_csv_path = folder_to_store + "/" + scenario_name + "_" + file_name + "_con_length.csv"
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
new_row = ["src_ip","dst_ip","length"]
csv_writer.writerow(new_row)
csvfile.close()
appended_packet_counter = 0
connections = {}
write_counter = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
if IP in packet:
src_ip = packet[IP].src
dst_ip = packet[IP].dst
if (src_ip, dst_ip) not in connections:
connections[(src_ip, dst_ip)] = 0
appended_packet_counter = appended_packet_counter + 1
old_entry = connections[(src_ip, dst_ip)]
new_entry = old_entry + 1
connections[(src_ip, dst_ip)] = new_entry
if appended_packet_counter == 1500000:
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for (src_ip, dst_ip), amount in connections.items():
new_line = [src_ip, dst_ip, amount]
csv_writer.writerow(new_line)
csvfile.close()
appended_packet_counter = 0
connections.clear()
appended_packet_counter = 0
print("Write " + str(write_counter) + " Finish")
write_counter = write_counter + 1
packets.close()
if (len(connections) > 0):
print("Write " + str(write_counter))
with open(summary_csv_path, 'a', newline='') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for (src_ip, dst_ip), amount in connections.items():
new_line = [src_ip, dst_ip , amount]
csv_writer.writerow(new_line)
csvfile.close()
connections.clear()
sys.exit()
@staticmethod
def determining_avg_connection_length_per_detailed_label_connection_level(path_to_original_file, path_to_storage):
path_to_original_file = path_to_original_file
path_to_storage = path_to_storage
scenarios = sorted([f.path for f in os.scandir(path_to_original_file) if f.is_dir()])
for scenario_index, scenario_path in enumerate(scenarios):
scenario_name = os.path.basename(scenario_path)
print(scenario_name)
print("Scenario " + str(scenario_index + 1) + "/" + str(len(scenarios)))
print("Loading Logg File")
subfolders = [f.path for f in os.scandir(scenario_path) if f.is_dir()]
for subfolder in subfolders:
if os.path.basename(subfolder) == "bro":
log_file = subfolder + "/conn.log.labeled"
detailed_label_count_file = subfolder + "/detailed_label_conn_level.csv"
detailed_label_df = pd.read_csv(detailed_label_count_file)
zat = LogToDataFrame()
bro_original_df = zat.create_dataframe(log_file)
break
bro_original_df["detailed_label"] = bro_original_df["tunnel_parents label detailed-label"].apply(lambda x: x.split(" ")[2].strip())
bro_original_df = bro_original_df.drop(columns=['uid', 'id.orig_p', 'id.resp_p', 'proto', 'service', 'duration', 'orig_bytes','resp_bytes', 'conn_state', 'local_orig', 'local_resp', 'missed_bytes', 'history','orig_pkts', 'orig_ip_bytes', 'resp_pkts', 'resp_ip_bytes', 'tunnel_parents label detailed-label'])
bro_original_df = bro_original_df.rename(columns={"id.orig_h": "src_ip", "id.resp_h": "dst_ip"})
bro_original_df.sort_values(["src_ip", "dst_ip"], inplace=True)
bro_original_df.set_index(['src_ip', 'dst_ip'])
bro_original_df = bro_original_df.groupby(['src_ip', 'dst_ip'])["detailed_label"].value_counts().to_frame()
bro_original_df = bro_original_df.rename(columns={"detailed_label": "count"}).reset_index().drop(columns="count")
bro_original_df = bro_original_df.sort_values(by=['src_ip', 'dst_ip'])
detailed_label_df["connection_count"] = np.where(detailed_label_df["connection_count"] > 1000, 1000, detailed_label_df["connection_count"])
detailed_label_dic = detailed_label_df.drop(columns="scenario").set_index("detailed_label").to_dict()
print("Reading PCAP File")
pcap_files = glob.glob(scenario_path + "/*.pcap")
for file_index, pcap_file_path in enumerate(pcap_files):
file_name = os.path.basename(pcap_file_path)
print(file_name)
print("File " + str(file_index + 1) + "/" + str(len(pcap_files)))
connection_dic = {}
with PcapReader(pcap_file_path) as packets:
for packet_count, packet in enumerate(packets):
if IP in packet:
src_ip = packet[IP].src
dst_ip = packet[IP].dst
detailed_label = bro_original_df[(bro_original_df["src_ip"] == src_ip) & (bro_original_df["dst_ip"] == dst_ip)]["detailed_label"].values
if len(detailed_label) > 0:
detailed_label = detailed_label[0]
if (src_ip, dst_ip, detailed_label) in connection_dic:
old_value = connection_dic[(src_ip, dst_ip, detailed_label)]
new_value = old_value + 1
connection_dic[(src_ip, dst_ip, detailed_label)] = new_value
else:
still_needed = int(detailed_label_dic["connection_count"][detailed_label])
if still_needed > 0:
new_needed = still_needed - 1
detailed_label_dic["connection_count"][detailed_label] = new_needed
connection_dic[(src_ip, dst_ip, detailed_label)] = 1
packets.close()
if len(connection_dic) > 0:
src_ip_list = []
dst_ip_list = []
detailed_label_list = []
connection_length_list = []
for key, value in connection_dic.items():
src_ip_list.append(key[0])
dst_ip_list.append(key[1])
detailed_label_list.append(key[2])
connection_length_list.append(value)
data = {"src_ip": src_ip_list, "dst_ip": dst_ip_list, "detailed_label" : detailed_label_list, "connection_length": connection_length_list}
final_df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
ratings=pd.read_csv('toy_dataset.csv',index_col=0)
ratings.head()
ratings=ratings.fillna(0)
def standardize(row):
new_row=(row-row.mean())/(row.max()-row.min())
return new_row
ratings_std=ratings.apply(standardize)
ratings_std.head()
item_similarity=cosine_similarity(ratings_std.T)
item_similarity_df=pd.DataFrame(item_similarity,index=ratings.columns,columns=ratings.columns)
item_similarity_df.head()
#reccomending movies based on similarity score
def get_movies(movie_name,user_rating):
similar_score=item_similarity_df[movie_name]*user_rating
similar_score=similar_score.sort_values(ascending=False)
return similar_score
print(get_movies("romantic3",1))
action_lover=[("action1",5),("romantic2",1),("romantic3",1)]
similar_movies= | pd.DataFrame() | pandas.DataFrame |
import os
import re
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as pl
import seaborn as sns
import scanpy as sc
import pandas as pd
from scipy import sparse
import logging
import sys
# ########################################################################### #
# ###################### Set up the logging ################################# #
# ########################################################################### #
L = logging.getLogger(__name__)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
log_handler.setLevel(logging.INFO)
L.addHandler(log_handler)
L.setLevel(logging.INFO)
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
sc.logging.print_versions()
# ########################################################################### #
# ######################## Parse the arguments ############################## #
# ########################################################################### #
parser = argparse.ArgumentParser()
# parser.add_argument("--reduced_dims_matrix_file", default="reduced_dims.tsv.gz", type=str,
# help="File with reduced dimensions")
# parser.add_argument("--barcode_file", default="barcodes.tsv.gz", type=str,
# help="File with the cell barcodes")
parser.add_argument("--anndata", default="anndata.h5ad",
help="The anndata object")
# parser.add_argument("--umap", default="umap.tsv.gz",
# help="The umap coordinates")
# parser.add_argument("--features_file", default="features.tsv.gz", type=str,
# help="File with the feature names")
parser.add_argument("--outdir",default=1, type=str,
help="path to output directory")
parser.add_argument("--cluster_assignments", default=1, type=str,
help="gzipped tsv file with cell cluster assignments")
parser.add_argument("--cluster_colors", default=1, type=str,
help="tsv file with the color palette for the clusters")
# parser.add_argument("--comps", default="1", type=str,
# help="Number of dimensions to include in knn and umap computation")
# parser.add_argument("--k", default=20, type=int,
# help="number of neighbors")
args = parser.parse_args()
# ########################################################################### #
# ############## Create outdir and set results file ######################### #
# ########################################################################### #
# write folder
# results_file = args.outdir + "/" + "paga_anndata.h5ad"
# figures folder
sc.settings.figdir = args.outdir
# Get the color palette
ggplot_palette = [x for x in pd.read_csv(args.cluster_colors,
header=None, sep="\t")[0].values]
ggplot_cmap = ListedColormap(sns.color_palette(ggplot_palette).as_hex())
sc.settings.set_figure_params(dpi=300, dpi_save=300)
# ########################################################################### #
# ############################### Run PAGA ################################## #
# ########################################################################### #
# Read in the anndata object with pre-computed neighbors.
adata = sc.read_h5ad(args.anndata)
# Read and add cluster ids
df = | pd.read_csv(args.cluster_assignments,sep="\t") | pandas.read_csv |
#--------------------------------------------------------------------
# Paper: NEORL: A Framework for NeuroEvolution Optimization with RL
# Section: Script for supplementary materials section 8
# Contact: <NAME> (<EMAIL>)
# Last update: 9/10/2021
#---------------------------------------------------------------------
#--------------------------------------------------------
# Import Packages
#--------------------------------------------------------
from neorl.benchmarks import TSP
from neorl import PPO2, DQN, ACER, ACKTR, A2C
from neorl import MlpPolicy, DQNPolicy
from neorl import RLLogger
import matplotlib.pyplot as plt
import pandas as pd
#--------------------------------------------------------
# TSP Data
#--------------------------------------------------------
def TSP_Data(n_city):
""""
Function provides initial data to construct a TSP enviroment
:param n_city: (int) number of cities, choose either 51 or 100
:return: city_loc_list (list), optimum_tour_city (list), episode_length (int)
"""
if n_city == 51:
#---51 cities
#locations
city_loc_list = [[37,52],[49,49],[52,64],[20,26],[40,30],[21,47],[17,63],[31,62],[52,33],[51,21],[42,41],[31,32],[5,25]\
,[12, 42],[36, 16],[52, 41],[27, 23],[17, 33],[13, 13],[57, 58],[62, 42],[42, 57],[16, 57],[8 ,52],[7 ,38],[27, 68],[30, 48]\
,[43, 67],[58, 48],[58, 27],[37, 69],[38, 46],[46, 10],[61, 33],[62, 63],[63, 69],[32, 22],[45, 35],[59, 15],[5 ,6],[10, 17]\
,[21, 10],[5 ,64],[30, 15],[39, 10],[32, 39],[25, 32],[25, 55],[48, 28],[56, 37],[30, 40]]
#optimal solution for comparison
optimum_tour_city = [1,22,8,26,31,28,3,36,35,20,2,29,21,16,50,34,30,9,49,10,39,33,45,15,44,42,40,19,41,13,25,14,24,43,7,23,48\
,6,27,51,46,12,47,18,4,17,37,5,38,11,32]
#episode length
episode_length = 2
elif n_city == 100:
#---100 cities
city_loc_list = [[-47,2],[49,-21 ],[35,-47 ],[30,-47 ],[-39,-50] ,[-35,-27],[-34,9 ],[-11,-8 ],[32,-44 ],[ 1,35 ],[ 36,37 ]\
,[ 12,37 ],[ 37,36 ],[ -26,-8],[ -21,32],[ -29,13],[ 26,-50],[ -7,-36],[ -34,-2],[ 21,-40],[ -25,46],[ -17,8 ],[ 21,27 ],[ -31,-14]\
,[ -15,-44],[ -33,-34],[ -49,45],[ -40,-1],[ -40,-33],[ -39,-26],[ -17,-16],[ 17,-20],[ 4,-11 ],[ 22,34 ],[ 28,24 ],[ -39,37]\
,[ 25,4 ],[ -35,14],[ 34,-5 ],[ 49,-43],[ 34,-29],[ -4,-50],[ 0,-14 ],[ 48,-25],[ -50,-5],[ -26,0 ],[ -13,21],[ -6,-41],[ 40,-33]\
,[ 12,-48],[ -38,16],[ -26,-38],[ -42,16],[ 13,8 ],[ 4,-8 ],[ -46,-20],[ -25,36],[ 22,21 ],[ 43,-5 ],[ -24,0 ],[ -12,-32],[ 47, 49 ]\
,[ 31,-35],[ 42,13 ],[ -45,-45],[ -48,-14],[ 28,23 ],[ 23,-43],[ 30,-25],[ 25,34 ],[ -7,32 ],[ -48,42],[ 1,-26 ],[ -45,32],[-20,35]\
,[ -12,21],[ -41,-49],[ -35,32],[ -43,44],[ -43,47],[ 27,20 ],[ -8,-9 ],[ 37,-11],[ -18,16],[ -41,43],[ -30,29],[ -31,-19],[48,22 ]\
,[ -45,-19],[ -15,30],[ 10,-8 ],[ 40,-33],[ 20,20 ],[ -22,33],[ 42,-37],[ 0,-8 ],[ -50,11],[ 37,-27],[ 39,-43],[-7,32]]
#optimal solution for comparison
optimum_tour_city = [1,97,53,51,38,16,7,28,19,46,60,22,84,76,47,86,78,36,74,72,27,80,79,85,21,57,94,15,75,90,71,100,10,12,34\
,70,11,13,62,88,64,81,67,35,23,58,93,54,37,39,83,59,2,44,98,41,69,63,49,92,95,40,99,3,9,4,17,68,20,50,42,25,48,18,61,73,32,91,55\
,33,43,96,82,8,31,14,24,87,6,26,52,5,77,65,29,30,89,56,66,45]
#episode length
episode_length = 2
else:
raise ValueError('--error: n_city is not defined, either choose 51 or 100')
return city_loc_list, optimum_tour_city, episode_length
#--------------------------------------------------------
# User Parameters for RL Optimisation
#--------------------------------------------------------
total_steps=500 #total time steps to run all optimizers
n_steps=12 #update frequency for A2C, ACKTR, PPO
n_city=51 #number of cities: choose 51 or 100
#---get some data to initialize the enviroment---
city_locs,optimum_tour,episode_length=TSP_Data(n_city=n_city)
#--------------------------------------------------------
# DQN
#--------------------------------------------------------
#create an enviroment object from the class
env=TSP(city_loc_list=city_locs, optimum_tour_city=optimum_tour,
episode_length=episode_length, method = 'dqn')
#create a callback function to log data
cb_dqn=RLLogger(check_freq=n_city)
#To activate logger plotter, add following arguments to cb_dqn:
#plot_freq = 51,n_avg_steps=10,pngname='DQN-reward'
#Also applicable to ACER.
#create a RL object based on the env object
dqn = DQN(DQNPolicy, env=env, seed=1)
#optimise the enviroment class
dqn.learn(total_timesteps=total_steps*n_city, callback=cb_dqn)
#--------------------------------------------------------
# ACER
#--------------------------------------------------------
env=TSP(city_loc_list=city_locs, optimum_tour_city=optimum_tour,
episode_length=episode_length, method = 'acer')
cb_acer=RLLogger(check_freq=n_city)
acer = ACER(MlpPolicy, env=env, seed=1)
acer.learn(total_timesteps=total_steps*n_city, callback=cb_acer)
#--------------------------------------------------------
# PPO
#--------------------------------------------------------
env=TSP(city_loc_list=city_locs, optimum_tour_city=optimum_tour,
episode_length=episode_length, method = 'ppo')
cb_ppo=RLLogger(check_freq=1)
#To activate logger plotter, add following arguments to cb_ppo:
#plot_freq = 1, n_avg_steps=10, pngname='PPO-reward'
#Also applicable to A2C, ACKTR.
ppo = PPO2(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
ppo.learn(total_timesteps=total_steps, callback=cb_ppo)
#--------------------------------------------------------
# ACKTR
#--------------------------------------------------------
env=TSP(city_loc_list=city_locs, optimum_tour_city=optimum_tour,
episode_length=episode_length, method = 'acktr')
cb_acktr=RLLogger(check_freq=1)
acktr = ACKTR(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
acktr.learn(total_timesteps=total_steps, callback=cb_acktr)
#--------------------------------------------------------
# A2C
#--------------------------------------------------------
env=TSP(city_loc_list=city_locs, optimum_tour_city=optimum_tour,
episode_length=episode_length, method = 'a2c')
cb_a2c=RLLogger(check_freq=1)
a2c = A2C(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
a2c.learn(total_timesteps=total_steps, callback=cb_a2c)
#--------------------------------
#Summary Results
#--------------------------------
print('--------------- DQN results ---------------')
print('The best value of x found:', cb_dqn.xbest)
print('The best value of y found:', cb_dqn.rbest)
print('--------------- ACER results ---------------')
print('The best value of x found:', cb_acer.xbest)
print('The best value of y found:', cb_acer.rbest)
print('--------------- PPO results ---------------')
print('The best value of x found:', cb_ppo.xbest)
print('The best value of y found:', cb_ppo.rbest)
print('--------------- ACKTR results ---------------')
print('The best value of x found:', cb_acktr.xbest)
print('The best value of y found:', cb_acktr.rbest)
print('--------------- A2C results ---------------')
print('The best value of x found:', cb_a2c.xbest)
print('The best value of y found:', cb_a2c.rbest)
#--------------------------------
#Summary Plots
#--------------------------------
log_dqn = pd.DataFrame(cb_dqn.r_hist).cummax(axis = 0).values
log_acer = | pd.DataFrame(cb_acer.r_hist) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# This code is initially based on the Kaggle kernel from <NAME>, which can be found in the following link
# https://www.kaggle.com/neviadomski/how-to-get-to-top-25-with-simple-model-sklearn/notebook
# and the Kaggle kernel from <NAME>, which can be found in the link below
# https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python/notebook
# Also, part of the preprocessing and modelling has been inspired by this kernel from Serigne
# https://www.kaggle.com/serigne/stacked-regressions-top-4-on-leaderboard
# And this kernel from juliencs has been pretty helpful too!
# https://www.kaggle.com/juliencs/a-study-on-regression-applied-to-the-ames-dataset
# Adding needed libraries and reading data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model, preprocessing
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.kernel_ridge import KernelRidge
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.utils import shuffle
from scipy import stats
from scipy.stats import norm, skew, boxcox
from scipy.special import boxcox1p
import xgboost as xgb
import lightgbm as lgb
import warnings
warnings.filterwarnings('ignore')
# Class AveragingModels
# This class is Serigne's simplest way of stacking the prediction models, by
# averaging them. We are going to use it as it represents the same that we have
# been using in the late submissions, but this applies perfectly to rmsle_cv function.
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
#Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
train = pd.read_csv("../../train.csv")
test = pd.read_csv("../../test.csv")
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
train.drop('Id', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
# Visualizing outliers
fig, ax = plt.subplots()
ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
#plt.show()
# Now the outliers can be deleted
train = train.drop(train[(train['GrLivArea'] > 4000) & (train['SalePrice'] < 300000)].index)
#Check the graphic again, making sure there are no outliers left
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
#plt.show()
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
train["SalePrice"] = np.log1p(train["SalePrice"])
#Check the new distribution
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
#plt.show()
# Splitting to features and labels
train_labels = train.pop('SalePrice')
# Test set does not even have a 'SalePrice' column, so both sets can be concatenated
features = pd.concat([train, test], keys=['train', 'test'])
# Checking for missing data, showing every variable with at least one missing value in train set
total_missing_data = features.isnull().sum().sort_values(ascending=False)
missing_data_percent = (features.isnull().sum()/features.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total_missing_data, missing_data_percent], axis=1, keys=['Total', 'Percent'])
print(missing_data[missing_data['Percent']> 0])
# Deleting non-interesting variables for this case study
features.drop(['Utilities'], axis=1, inplace=True)
# Imputing missing values and transforming certain columns
# Converting OverallCond to str
features.OverallCond = features.OverallCond.astype(str)
# MSSubClass as str
features['MSSubClass'] = features['MSSubClass'].astype(str)
# MSZoning NA in pred. filling with most popular values
features['MSZoning'] = features['MSZoning'].fillna(features['MSZoning'].mode()[0])
# LotFrontage NA filling with median according to its OverallQual value
median = features.groupby('OverallQual')['LotFrontage'].transform('median')
features['LotFrontage'] = features['LotFrontage'].fillna(median)
# Alley NA in all. NA means no access
features['Alley'] = features['Alley'].fillna('NoAccess')
# MasVnrArea NA filling with median according to its OverallQual value
median = features.groupby('OverallQual')['MasVnrArea'].transform('median')
features['MasVnrArea'] = features['MasVnrArea'].fillna(median)
# MasVnrType NA in all. filling with most popular values
features['MasVnrType'] = features['MasVnrType'].fillna(features['MasVnrType'].mode()[0])
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2
# NA in all. NA means No basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
features[col] = features[col].fillna('NoBsmt')
# TotalBsmtSF NA in pred. I suppose NA means 0
features['TotalBsmtSF'] = features['TotalBsmtSF'].fillna(0)
# Electrical NA in pred. filling with most popular values
features['Electrical'] = features['Electrical'].fillna(features['Electrical'].mode()[0])
# KitchenAbvGr to categorical
features['KitchenAbvGr'] = features['KitchenAbvGr'].astype(str)
# KitchenQual NA in pred. filling with most popular values
features['KitchenQual'] = features['KitchenQual'].fillna(features['KitchenQual'].mode()[0])
# FireplaceQu NA in all. NA means No Fireplace
features['FireplaceQu'] = features['FireplaceQu'].fillna('NoFp')
# Garage-like features NA in all. NA means No Garage
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageYrBlt', 'GarageCond'):
features[col] = features[col].fillna('NoGrg')
# GarageCars and GarageArea NA in pred. I suppose NA means 0
for col in ('GarageCars', 'GarageArea'):
features[col] = features[col].fillna(0.0)
# SaleType NA in pred. filling with most popular values
features['SaleType'] = features['SaleType'].fillna(features['SaleType'].mode()[0])
# PoolQC NA in all. NA means No Pool
features['PoolQC'] = features['PoolQC'].fillna('NoPool')
# MiscFeature NA in all. NA means None
features['MiscFeature'] = features['MiscFeature'].fillna('None')
# Fence NA in all. NA means no fence
features['Fence'] = features['Fence'].fillna('NoFence')
# BsmtHalfBath and BsmtFullBath NA means 0
for col in ('BsmtHalfBath', 'BsmtFullBath'):
features[col] = features[col].fillna(0)
# Functional NA means Typ
features['Functional'] = features['Functional'].fillna('Typ')
# NA in Bsmt SF variables means not that type of basement, 0 square feet
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF'):
features[col] = features[col].fillna(0)
# NA in Exterior1st filled with the most common value
features['Exterior1st'] = features['Exterior1st'].fillna(features['Exterior1st'].mode()[0])
# NA in Exterior2nd means No 2nd material
features['Exterior2nd'] = features['Exterior2nd'].fillna('NoExt2nd')
# Year and Month to categorical
features['YrSold'] = features['YrSold'].astype(str)
features['MoSold'] = features['MoSold'].astype(str)
# Adding total sqfootage feature and removing Basement, 1st and 2nd floor features
features['TotalSF'] = features['TotalBsmtSF'] + features['1stFlrSF'] + features['2ndFlrSF']
#features.drop(['TotalBsmtSF', '1stFlrSF', '2ndFlrSF'], axis=1, inplace=True)
# Let's rank those categorical features that can be understood to have an order
# Criterion: give higher ranking to better feature values
features = features.replace({'Street' : {'Grvl':1, 'Pave':2},
'Alley' : {'NoAccess':0, 'Grvl':1, 'Pave':2},
'LotShape' : {'I33':1, 'IR2':2, 'IR1':3, 'Reg':4},
'LandContour' : {'Low':1, 'HLS':2, 'Bnk':3, 'Lvl':4},
'LotConfig' : {'FR3':1, 'FR2':2, 'CulDSac':3, 'Corner':4, 'Inside':5},
'LandSlope' : {'Gtl':1, 'Mod':2, 'Sev':3},
'HouseStyle' : {'1Story':1, '1.5Fin':2, '1.5Unf':3, '2Story':4, '2.5Fin':5, '2.5Unf':6, 'SFoyer':7, 'SLvl':8},
'ExterQual' : {'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'ExterCond' : {'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'BsmtQual' : {'NoBsmt':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'BsmtCond' : {'NoBsmt':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'BsmtExposure' : {'NoBsmt':0, 'No':1, 'Mn':2, 'Av':3, 'Gd':4},
'BsmtFinType1' : {'NoBsmt':0, 'Unf':1, 'LwQ':2, 'BLQ':3, 'Rec':4, 'ALQ':5, 'GLQ':6},
'BsmtFinType2' : {'NoBsmt':0, 'Unf':1, 'LwQ':2, 'BLQ':3, 'Rec':4, 'ALQ':5, 'GLQ':6},
'HeatingQC' : {'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'CentralAir' : {'N':0, 'Y':1},
'KitchenQual' : {'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'Functional' : {'Sal':0, 'Sev':1, 'Maj2':2, 'Maj1':3, 'Mod':4, 'Min2':5, 'Min1':6, 'Typ':7},
'FireplaceQu' : {'NoFp':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'GarageType' : {'NoGrg':0, 'Detchd':1, 'CarPort':2, 'BuiltIn':3, 'Basment':4, 'Attchd':5, '2Types':6},
'GarageFinish' : {'NoGrg':0, 'Unf':1, 'RFn':2, 'Fin':3},
'GarageQual' : {'NoGrg':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'GarageCond' : {'NoGrg':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5},
'PavedDrive' : {'N':0, 'P':1, 'Y':2},
'PoolQC' : {'NoPool':0, 'Fa':1, 'TA':2, 'Gd':3, 'Ex':4},
'Fence' : {'NoFence':0, 'MnWw':1, 'MnPrv':2, 'GdWo':3, 'GdPrv':4}
})
###################################################################################################
# Now we try to simplify some of the ranked features, reducing its number of values
features['SimplifiedOverallCond'] = features.OverallCond.replace({1:1, 2:1, 3:1, # bad
4:2, 5:2, 6:2, # average
7:3, 8:3, # good
9:4, 10:4 # excellent
})
features['SimplifiedHouseStyle'] = features.HouseStyle.replace({1:1, # 1 storey houses
2:2, 3:2, # 1.5 storey houses
4:3, # 2 storey houses
5:4, 6:4, # 2.5 storey houses
7:5, 8:5 # splitted houses
})
features['SimplifiedExterQual'] = features.ExterQual.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedExterCond'] = features.ExterCond.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedBsmtQual'] = features.BsmtQual.replace({1:1, 2:1, # bad, not necessary to check 0 value because will remain 0
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedBsmtCond'] = features.BsmtCond.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedBsmtExposure'] = features.BsmtExposure.replace({1:1, 2:1, # bad
3:2, 4:2 # good
})
features['SimplifiedBsmtFinType1'] = features.BsmtFinType1.replace({1:1, 2:1, 3:1, # bad
4:2, 5:2, # average
6:3 # good
})
features['SimplifiedBsmtFinType2'] = features.BsmtFinType2.replace({1:1, 2:1, 3:1, # bad
4:2, 5:2, # average
6:3 # good
})
features['SimplifiedHeatingQC'] = features.HeatingQC.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedKitchenQual'] = features.KitchenQual.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedFunctional'] = features.Functional.replace({0:0, 1:0, # really bad
2:1, 3:1, # quite bad
4:2, 5:2, 6:2, # small deductions
7:3 # working fine
})
features['SimplifiedFireplaceQu'] = features.FireplaceQu.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedGarageQual'] = features.GarageQual.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedGarageCond'] = features.GarageCond.replace({1:1, 2:1, # bad
3:2, 4:2, # good/average
5:3 # excellent
})
features['SimplifiedPoolQC'] = features.PoolQC.replace({1:1, 2:1, # average
3:2, 4:2 # good
})
features['SimplifiedFence'] = features.Fence.replace({1:1, 2:1, # bad
3:2, 4:2 # good
})
# Now, let's combine some features to get newer and cooler features
# Overall Score of the house (and simplified)
features['OverallScore'] = features['OverallQual'] * features['OverallCond']
features['SimplifiedOverallScore'] = features['OverallQual'] * features['SimplifiedOverallCond']
# Overall Score of the garage (and simplified garage)
features['GarageScore'] = features['GarageQual'] * features['GarageCond']
features['SimplifiedGarageScore'] = features['SimplifiedGarageQual'] * features['SimplifiedGarageCond']
# Overall Score of the exterior (and simplified exterior)
features['ExterScore'] = features['ExterQual'] * features['ExterCond']
features['SimplifiedExterScore'] = features['SimplifiedExterQual'] * features['SimplifiedExterCond']
# Overall Score of the pool (and simplified pool)
features['PoolScore'] = features['PoolQC'] * features['PoolArea']
features['SimplifiedPoolScore'] = features['SimplifiedPoolQC'] * features['PoolArea']
# Overall Score of the kitchens (and simplified kitchens)
features['KitchenScore'] = features['KitchenQual'] * features['KitchenAbvGr']
features['SimplifiedKitchenScore'] = features['SimplifiedKitchenQual'] * features['KitchenAbvGr']
# Overall Score of the fireplaces (and simplified fireplaces)
features['FireplaceScore'] = features['FireplaceQu'] * features['Fireplaces']
features['SimplifiedFireplaceScore'] = features['SimplifiedFireplaceQu'] * features['Fireplaces']
# Total number of bathrooms
features['TotalBaths'] = features['FullBath'] + (0.5*features['HalfBath']) + features['BsmtFullBath'] + (0.5*features['BsmtHalfBath'])
###################################################################################################
# Box-cox transformation to most skewed features
numeric_feats = features.dtypes[features.dtypes != 'object'].index
# Check the skew of all numerical features
skewed_feats = features[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features:")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(10)
# Box-cox
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform\n".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
features[feat] = boxcox1p(features[feat], lam)
# Label encoding to some categorical features
categorical_features = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
lbl = LabelEncoder()
for col in categorical_features:
lbl.fit(list(features[col].values))
features[col] = lbl.transform(list(features[col].values))
# Getting Dummies
features = pd.get_dummies(features)
# Splitting features
train_features = features.loc['train'].select_dtypes(include=[np.number]).values
test_features = features.loc['test'].select_dtypes(include=[np.number]).values
# Validation function
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=101010).get_n_splits(train_features)
rmse= np.sqrt(-cross_val_score(model, train_features, train_labels, scoring="neg_mean_squared_error", cv = kf))
return(rmse)
# Modelling
enet_model = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=101010))
print(enet_model)
#score = rmsle_cv(enet_model)
#print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
gb_model = ensemble.GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =101010)
print(gb_model)
#score = rmsle_cv(gb_model)
#print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
xgb_model = xgb.XGBRegressor(colsample_bytree=0.2, gamma=0.0,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7, n_estimators=2200,
reg_alpha=0.9, reg_lambda=0.6,
subsample=0.5, silent=1, seed=101010)
print(xgb_model)
#score = rmsle_cv(xgb_model)
#print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
lasso_model = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=101010))
print(lasso_model)
#score = rmsle_cv(lasso_model)
#print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
krr_model = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
print(krr_model)
#score = rmsle_cv(krr_model)
#print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
# Now let's check how do the averaged models work
averaged_models = AveragingModels(models = (gb_model, xgb_model, enet_model, lasso_model, krr_model))
print("AVERAGED MODELS")
score = rmsle_cv(averaged_models)
print("\nRMSLE: {:.4f} (+/- {:.4f})\n".format(score.mean(), score.std()))
# Getting our SalePrice estimation
averaged_models.fit(train_features, train_labels)
final_labels = np.exp(averaged_models.predict(test_features))
# Saving to CSV
| pd.DataFrame({'Id': test_ID, 'SalePrice': final_labels}) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""Data pipeline step 1: put data from various sources into a sqlite3 database.
A collection of one-time use data import scripts that assemble a unified
database from multiple data sources. To be used as a command line program from
the terminal (see the db_import bash script). This is not optimized for
efficiency and might take quite a bit of time and main memory to run. This
whole script isn't very pretty :(
size offset and pid arguments allow poor man's parallel processing by running
multiple instances of this script and afterwards merging all output databases
into the main one (done in the db_import bash script). pid governs the start
id for the produced rows, therefore the individual rows can be merged without
loosing appropriate relations between profiles and profiledata. size and offset
are used to select subsets of input files.
Available data sources:
raso_fwf
Radiosonde data from fixed width format files. These make up the
high-resolution climatology also used by Massaro (2013) and Meyer (2016).
raso_cosmo7
Simulated vertical profiles from COSMO7 in L2E format.
raso_bufr
Radiosonde data from bufr files. These are the additional profiles after
2012 from ERTEL2.
nordkette
Nordkette slope temperature measurements used previously by Meyer (2016).
igmk
Brightness temperature simulations based on the high-resolution
climatology. The used model is MONORTM, data were apparently processed by
the IGMK. These data come from the netcdf files that are the input to the
IDL script used by Massaro (2013) and Meyer (2016) for their regression
retrievals.
hatpro
HATPRO raw data import: BLB and BRT joined with data from the MET files.
description
Adds a table information to the database containing descriptions of the
kind values used in profiles and hatpro tables.
"""
import argparse, json, os
import datetime as dt
from glob import glob
from operator import itemgetter
from collections import OrderedDict
from toolz import groupby
import numpy as np
import pandas as pd
from db_tools import Database
import formulas as fml
scheme = (
"CREATE TABLE IF NOT EXISTS profiles("
"id INTEGER PRIMARY KEY, "
"kind TEXT, "
"valid NUMERIC, "
"lead NUMERIC, "
"cloudy INTEGER, "
"file TEXT"
"); "
"CREATE TABLE IF NOT EXISTS profiledata("
"id INTEGER PRIMARY KEY, "
"profile INTEGER, "
"p NUMERIC, "
"z NUMERIC, "
"T NUMERIC, "
"Td NUMERIC, "
"qvap NUMERIC, "
"qliq NUMERIC"
"); "
"CREATE TABLE IF NOT EXISTS hatpro("
"id INTEGER PRIMARY KEY, "
"kind TEXT, "
"valid NUMERIC, "
"angle NUMERIC, "
"TB_22240MHz NUMERIC, "
"TB_23040MHz NUMERIC, "
"TB_23840MHz NUMERIC, "
"TB_25440MHz NUMERIC, "
"TB_26240MHz NUMERIC, "
"TB_27840MHz NUMERIC, "
"TB_31400MHz NUMERIC, "
"TB_51260MHz NUMERIC, "
"TB_52280MHz NUMERIC, "
"TB_53860MHz NUMERIC, "
"TB_54940MHz NUMERIC, "
"TB_56660MHz NUMERIC, "
"TB_57300MHz NUMERIC, "
"TB_58000MHz NUMERIC, "
"p NUMERIC, "
"T NUMERIC, "
"qvap NUMERIC, "
"rain INTEGER"
"); "
"CREATE TABLE IF NOT EXISTS nordkette("
"valid NUMERIC PRIMARY KEY, "
"T_710m NUMERIC,"
"T_920m NUMERIC,"
"T_1220m NUMERIC,"
"T_2270m NUMERIC"
"); "
)
def select_files(files):
"""Select a subset of all files according to the specifications in args."""
files = list(sorted(files))
if args.offset is not None:
files = files[int(args.offset):]
if args.size is not None:
files = files[:int(args.size)]
return files
def filename(path):
return path.split("/")[-1]
# Yeah, yeah I should have used os.pathlib... Sorry Windows-people :(
def read_raso_fwf(pid, path):
"""Read a fixed-width format radiosonde file.
These are the ones containing the climatology that was also used by
<NAME> and <NAME>.
"""
colspecs = [(8, 17), (17, 26), (26, 36), (43, 49)]
names = ["p", "z", "T", "Td"]
def errfloat(x):
return None if "/" in x else float(x)
file = filename(path)
valid = (dt.datetime
.strptime(file, "%Y%m%d_%H%M.reduced.txt")
.replace(tzinfo=dt.timezone.utc)
.timestamp()
)
df = pd.read_fwf(path, colspecs=colspecs, names=names,
converters={n: errfloat for n in names},
skiprows=1)
df["T"] = 273.15 + df["T"]
df["Td"] = 273.15 + df["Td"]
ps = pd.Series(np.repeat(pid, len(df)), name="profile")
# Calculate specific humidity and cloud water content
qvap = fml.qvap(df["p"], df["Td"])
qliq = fml.qliq(df["z"], df["p"], df["T"], df["Td"])
data = pd.concat([ps, df, qvap, qliq], axis=1).as_matrix().tolist()
cloudy = 1 if (qliq > 0).any() else 0
return pid, data, valid, cloudy, file
def read_l2e(pid, path):
"""COSMO7 simulated soundings come as l2e files which are a sequence of
headers and fixed-width format tables."""
from l2e import parse as parse_l2e
with open(path, "r") as f:
for valid, run, df in parse_l2e(f, wmonrs=["11120"]):
valid = valid.timestamp()
run = run.timestamp()
lead = valid - run
# Clouds in COSMO7 output are always at 100 % RH
qliq = fml.qliq(df["T"], df["qcloud"])
ps = pd.Series(np.repeat(pid, len(df)), name="profile")
data = pd.concat([ps, df[["p", "z", "T", "Td", "qvap"]], qliq],
axis=1).dropna(axis=0).as_matrix().tolist()
cloudy = 1 if (qliq > 0).any() else 0
yield pid, data, valid, valid-run, cloudy, filename(path)
pid = pid + 1
def read_bufr_group(pid, paths):
"""Additional radiosonde profiles are available form the ertel2 archive
in the form of BUFR files.
/!\ BUFRReader is very (!) slow.
"""
from bufr import BUFRReader
reader = BUFRReader()
for _, path in sorted(paths, key=lambda p: -os.path.getsize(p[1])):
dfs, metadata = reader.read(path)
try: df = dfs[-1][["p", "z", "T", "Td"]]
except KeyError: continue
valid = metadata["datetime"].timestamp()
ps = pd.Series(np.repeat(pid, len(df)), name="profile")
# Calculate specific humidity and cloud water content
qvap = fml.qvap(df["p"], df["Td"])
qliq = fml.qliq(df["z"], df["p"], df["T"], df["Td"])
data = pd.concat([ps, df, qvap, qliq], axis=1).as_matrix().tolist()
cloudy = 1 if (qliq > 0).any() else 0
if cloudy and metadata["cloud cover"] == 0:
print("Warning: {} | No clouds reported but found by RH criterion.".format(filename(path)))
return pid, data, valid, cloudy, filename(path)
def read_netcdf(file):
"""Simulated HATPRO data are available from yearly netcdf files compatible
with the IDL scripts that <NAME> and <NAME> used."""
import xarray
xdata = xarray.open_dataset(file)
substitutions = [
["n_angle", "elevation_angle"],
["n_date", "date"],
["n_frequency", "frequency"],
["n_height", "height_grid"]
]
for origin, target in substitutions:
xdata.coords[origin] = xdata.data_vars[target]
def to_date(d):
return (dt.datetime
.strptime(str(d), "%Y%m%d%H")
.replace(tzinfo=dt.timezone.utc)
.timestamp()
)
df = xdata["brightness_temperatures"].to_dataframe()
df = df.ix[~df.index.duplicated(keep="first")]
ddf = df.unstack(level=[1,2,3])
ddf.columns = ["{:>4}_f{}".format(round(c[2], 1), round(c[3]*1000)) for c in ddf.columns]
angles = set([c.split("_")[0] for c in ddf.columns])
for a in sorted(angles):
data = ddf[list(sorted(c for c in ddf.columns if c.startswith(a)))]
psfc = xdata["atmosphere_pressure_sfc"].to_dataframe()["atmosphere_pressure_sfc"]/100
psfc.name = "p"
Tsfc = xdata["atmosphere_temperature_sfc"].to_dataframe()["atmosphere_temperature_sfc"]
Tsfc.name = "T"
qsfc = xdata["atmosphere_humidity_sfc"].to_dataframe()["atmosphere_humidity_sfc"] / fml.ρ(p=psfc, T=Tsfc, e=0) # approx
qsfc.name = "q"
valid = pd.Series(data.index.map(to_date),
index=data.index, name="valid")
kind = pd.Series(np.repeat("igmk", [len(data)]),
index=data.index, name="kind")
angle = pd.Series(np.repeat(90-float(a), [len(data)]),
index=data.index, name="angle")
precip = pd.Series(np.repeat(None, [len(data)]),
index=data.index, name="rain")
data = pd.concat([kind, valid, angle, data, psfc,
Tsfc, qsfc, precip], axis=1)
yield data
parser = argparse.ArgumentParser()
parser.add_argument("--size", default=None)
parser.add_argument("--offset", default=None)
parser.add_argument("--pid", default=1)
parser.add_argument("--create", action="store_true")
parser.add_argument("data", nargs="?", default="")
parser.add_argument("output")
if __name__ == "__main__":
args = parser.parse_args()
pid = int(args.pid)
db = Database(args.output)
if args.create:
db.execute(scheme)
if args.data == "raso_fwf":
files = select_files(glob("../data/raso_fwf/*.reduced.txt"))
print("{} | {} | reading {} files".format(
args.data, args.output, len(files)))
rows, profiles = [], []
for path in files:
pid, data, valid, cloudy, file = read_raso_fwf(pid, path)
profiles.append([pid, "raso", valid, 0., cloudy, file])
rows.extend(data)
pid = pid + 1
print("{} | {} | writing {} of {} profiles... ".format(
args.data, args.output, len(profiles), len(files)), end="")
# Data is inserted in a dedicated if-block below
if args.data == "raso_cosmo7":
files = select_files(glob("../data/raso_cosmo7/*.l2e"))
print("{} | {} | reading {} files".format(
args.data, args.output, len(files)))
rows, profiles = [], []
for path in files:
for pid, data, valid, lead, cloudy, file in read_l2e(pid, path):
profiles.append([pid, "cosmo7", valid, lead, cloudy, file])
rows.extend(data)
pid = pid + 1
print("{} | {} | writing {} profiles... ".format(
args.data, args.output, len(profiles)), end="")
# Data is inserted in a dedicated if-block below
elif args.data == "raso_bufr":
def get_datestr(path):
file = filename(path)
return file[:8] + file[10:12] if "." in file[:10] else file[:10]
files = [(get_datestr(f), f) for f in glob("../data/raso_bufr/*.bfr")]
groups = groupby(itemgetter(0), (f for f in files if ("309052" in f[1]
or "30905" not in f[1])))
files = select_files(groups.values())
print("{} | {} | reading {} files".format(
args.data, args.output, len(files)))
rows, profiles = [], []
for group in files:
res = read_bufr_group(pid, group)
if res is None: continue
pid, data, valid, cloudy, file = res
profiles.append([pid, "raso", valid, 0., cloudy, file])
rows.extend(data)
pid = pid + 1
print("{} | {} | writing {} profiles... ".format(
args.data, args.output, len(profiles)), end="")
# Data is inserted in a dedicated if-block below
if args.data.startswith("raso"):
query = ("INSERT INTO profiles (id, kind, valid, lead, cloudy, file) "
"VALUES (?, ?, ?, ?, ?, ?);")
db.executemany(query, profiles)
query = ("INSERT INTO profiledata (profile, p, z, T, Td, qvap, qliq) "
"VALUES (?, ?, ?, ?, ?, ?, ?);")
db.executemany(query, rows)
print("done!")
if args.data == "nordkette":
files = [
"Alpenzoo_710m",
"Hungerburg_920m",
"Rastlboden_1220m",
"Hafelekar_2270m"
]
print("nordkette | {} | reading {} files".format(
args.output, len(files)))
def to_date(d):
return (dt.datetime
.strptime(d, "%Y-%m-%d %H:%M:%S")
.replace(tzinfo=dt.timezone.utc)
.timestamp()
)
def temperature(x):
return (None if not -499 < float(x) < 500
else round(float(x)/10 + 273.15, 2))
data = []
for station in files:
df = pd.read_fwf("../data/stations/TEMPIS_{}.txt".format(station),
colspecs=[(2, 21), (22, 30)],
names=["valid", station],
converters={"valid": to_date, station: temperature},
skiprows=[0]).set_index("valid")
data.append(df[station])
rows = pd.concat(data, axis=1).reset_index().as_matrix().tolist()
print("nordkette | {} | writing {} measurements... ".format(
args.output, len(data)), end="")
query = ("INSERT INTO nordkette (valid, T_710m, T_920m, T_1220m, "
"T_2270m) VALUES (?, ?, ?, ?, ?);")
db.executemany(query, rows)
print("done!")
if args.data == "igmk":
files = glob("../data/hatpro_netcdf/rt*.cdf")
print("{} | {} | reading {} files".format(
args.data, args.output, len(files)))
rows = []
for file in sorted(files):
for df in read_netcdf(file):
rows.extend(df.as_matrix().tolist())
print("{} | {} | writing {} rows... ".format(
args.data, args.output, len(rows)), end="")
query = ("INSERT INTO hatpro (kind, valid, angle, "
"TB_22240MHz, TB_23040MHz, TB_23840MHz, TB_25440MHz, "
"TB_26240MHz, TB_27840MHz, TB_31400MHz, TB_51260MHz, "
"TB_52280MHz, TB_53860MHz, TB_54940MHz, TB_56660MHz, "
"TB_57300MHz, TB_58000MHz, p, T, qvap, rain) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
"?, ?, ?, ?, ?, ?, ?);")
db.executemany(query, rows)
print("done!")
if args.data == "hatpro":
import hatpro
def get_df(files, kind=None):
dfs = []
for f in files:
dfs.append(hatpro.read(f))
out = | pd.concat(dfs, axis=0) | pandas.concat |
data_dir = '/mnt/lareaulab/cfbuenabadn/SingleCell/data/'
######################
# load_data_short.py #
######################
print('loading data')
import numpy as np
import pandas as pd
import os
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from scipy import stats as st
import seaborn as sns
import numpy.random as r
import sys
sys.path.insert(0, '/mnt/lareaulab/cfbuenabadn/sc_binary_splicing/utils/')
import splicing_utils as spu
from splicing_utils import *
import single_cell_plots as scp
from single_cell_plots import *
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
data_dir = '/mnt/lareaulab/cfbuenabadn/SingleCell/data/'
# load PSI tables
chen_PSI = pd.read_csv(data_dir + 'chen/processed_tables/chen.skipped_exons_psi.tab', sep='\t', index_col=0)
# SJ read tables
chen_read_counts = | pd.read_csv(data_dir + 'chen/processed_tables/chen.skipped_exons_SJreads.tab', sep='\t', index_col=0) | pandas.read_csv |
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
msg = "Cannot shift with no freq"
with pytest.raises(NullFrequencyError, match=msg):
idx.shift(1)
def test_shift_fill_value(self):
# GH#24128
ts = Series(
[1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_series_equal(result, exp)
exp = Series(
[0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = "'fill_value=f' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_dst(self):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = | Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]") | pandas.Series |
# -*- coding: utf-8 -*-
"""AirBnB_Berlin_V0.2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WTsUNqr6eNVVVTk92-YE-wsAZD6MD5I8
"""
# Commented out IPython magic to ensure Python compatibility.
## Importing required libraries
import pandas as pd
import numpy as np
from numpy.random import seed
seed(14)
import matplotlib.pyplot as plt
# %matplotlib inline
from datetime import datetime
import seaborn as sns
##import geopandas as gpd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
import xgboost as xgb
from xgboost import plot_importance
from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score
import time
from keras import models, layers, optimizers, regularizers
from keras.utils.vis_utils import model_to_dot
from IPython.display import SVG
from statsmodels.tsa.seasonal import seasonal_decompose
## import the data
raw_df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/listings.csv')
## print statement to show number of rows
print(f"The dataset contains {len(raw_df)} Airbnb listings")
## display all the columns
pd.set_option('display.max_columns', len(raw_df.columns))
## see 100 rows when looking at dataframe
| pd.set_option('display.max_rows', 100) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Utilities for working with related individuals (crosses, families, etc.).
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/ped.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# third party dependencies
import numpy as np
import numexpr as ne
import pandas
# internal dependencies
import anhima.gt
# constants to represent inheritance states
INHERIT_UNDETERMINED = 0
INHERIT_PARENT1 = 1
INHERIT_PARENT2 = 2
INHERIT_NONSEG_REF = 3
INHERIT_NONSEG_ALT = 4
INHERIT_NONPARENTAL = 5
INHERIT_PARENT_MISSING = 6
INHERIT_MISSING = 7
INHERITANCE_STATES = range(8)
INHERITANCE_LABELS = ('undetermined', 'parent1', 'parent2', 'non-seg ref',
'non-seg alt', 'non-parental', 'parent missing',
'missing')
def diploid_inheritance(parent_diplotype, gamete_haplotypes):
"""
Determine the transmission of parental alleles to a set of gametes.
Parameters
----------
parent_diplotype : array_like, shape (n_variants, 2)
An array of phased genotypes for a single diploid individual, where
each element of the array is an integer corresponding to an allele
index (-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
gamete_haplotypes : array_like, shape (n_variants, n_gametes)
An array of haplotypes for a set of gametes derived from the given
parent, where each element of the array is an integer corresponding
to an allele index (-1 = missing, 0 = reference allele, 1 = first
alternate allele, 2 = second alternate allele, etc.).
Returns
-------
inheritance : ndarray, uint8, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance, where 1 =
inheritance from first parental haplotype, 2 = inheritance from second
parental haplotype, 3 = inheritance of reference allele from parent
that is homozygous for the reference allele, 4 = inheritance of
alternate allele from parent that is homozygous for the alternate
allele, 5 = non-parental allele, 6 = parental genotype is missing,
7 = gamete allele is missing.
"""
# normalise inputs
parent_diplotype = np.asarray(parent_diplotype)
assert parent_diplotype.ndim == 2
assert parent_diplotype.shape[1] == 2
gamete_haplotypes = np.asarray(gamete_haplotypes)
assert gamete_haplotypes.ndim == 2
# convenience variables
parent1 = parent_diplotype[:, 0, np.newaxis] # noqa
parent2 = parent_diplotype[:, 1, np.newaxis] # noqa
gamete_is_missing = gamete_haplotypes < 0
parent_is_missing = np.any(parent_diplotype < 0, axis=1)
parent_is_hom_ref = anhima.gt.is_hom_ref(parent_diplotype)[:, np.newaxis] # noqa
parent_is_het = anhima.gt.is_het(parent_diplotype)[:, np.newaxis] # noqa
parent_is_hom_alt = anhima.gt.is_hom_alt(parent_diplotype)[:, np.newaxis] # noqa
# need this for broadcasting, but also need to retain original for later
parent_is_missing_bc = parent_is_missing[:, np.newaxis] # noqa
# N.B., use numexpr below where possible to avoid temporary arrays
# utility variable, identify allele calls where inheritance can be
# determined
callable = ne.evaluate('~gamete_is_missing & ~parent_is_missing_bc') # noqa
callable_seg = ne.evaluate('callable & parent_is_het') # noqa
# main inheritance states
inherit_parent1 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent1)'
)
inherit_parent2 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent2)'
)
nonseg_ref = ne.evaluate(
'callable & parent_is_hom_ref & (gamete_haplotypes == parent1)'
)
nonseg_alt = ne.evaluate(
'callable & parent_is_hom_alt & (gamete_haplotypes == parent1)'
)
nonparental = ne.evaluate(
'callable & (gamete_haplotypes != parent1)'
' & (gamete_haplotypes != parent2)'
)
# record inheritance states
# N.B., order in which these are set matters
inheritance = np.zeros_like(gamete_haplotypes, dtype='u1')
inheritance[inherit_parent1] = INHERIT_PARENT1
inheritance[inherit_parent2] = INHERIT_PARENT2
inheritance[nonseg_ref] = INHERIT_NONSEG_REF
inheritance[nonseg_alt] = INHERIT_NONSEG_ALT
inheritance[nonparental] = INHERIT_NONPARENTAL
inheritance[parent_is_missing] = INHERIT_PARENT_MISSING
inheritance[gamete_is_missing] = INHERIT_MISSING
return inheritance
def diploid_mendelian_error_biallelic(parental_genotypes, progeny_genotypes):
"""Implementation of function to find Mendelian errors optimised for
biallelic variants.
"""
# recode genotypes for convenience
parental_genotypes_012 = anhima.gt.as_012(parental_genotypes)
progeny_genotypes_012 = anhima.gt.as_012(progeny_genotypes)
# convenience variables
p1 = parental_genotypes_012[:, 0, np.newaxis] # noqa
p2 = parental_genotypes_012[:, 1, np.newaxis] # noqa
o = progeny_genotypes_012 # noqa
# enumerate all possible combinations of Mendel error genotypes
ex = '((p1 == 0) & (p2 == 0) & (o == 1))' \
' + ((p1 == 0) & (p2 == 0) & (o == 2)) * 2' \
' + ((p1 == 2) & (p2 == 2) & (o == 1))' \
' + ((p1 == 2) & (p2 == 2) & (o == 0)) * 2' \
' + ((p1 == 0) & (p2 == 2) & (o == 0))' \
' + ((p1 == 0) & (p2 == 2) & (o == 2))' \
' + ((p1 == 2) & (p2 == 0) & (o == 0))' \
' + ((p1 == 2) & (p2 == 0) & (o == 2))' \
' + ((p1 == 0) & (p2 == 1) & (o == 2))' \
' + ((p1 == 1) & (p2 == 0) & (o == 2))' \
' + ((p1 == 2) & (p2 == 1) & (o == 0))' \
' + ((p1 == 1) & (p2 == 2) & (o == 0))'
errors = ne.evaluate(ex).astype('u1')
return errors
def diploid_mendelian_error_multiallelic(parental_genotypes, progeny_genotypes,
max_allele):
"""Implementation of function to find Mendelian errors generalised for
multiallelic variants.
"""
# transform genotypes into per-call allele counts
alleles = range(max_allele + 1)
p = anhima.gt.as_allele_counts(parental_genotypes, alleles=alleles)
o = anhima.gt.as_allele_counts(progeny_genotypes, alleles=alleles)
# detect nonparental and hemiparental inheritance by comparing allele
# counts between parents and progeny
ps = p.sum(axis=1)[:, np.newaxis] # add allele counts for both parents
ac_diff = (o - ps).astype('i1')
ac_diff[ac_diff < 0] = 0
# sum over all alleles
errors = np.sum(ac_diff, axis=2).astype('u1')
# detect uniparental inheritance by finding cases where no alleles are
# shared between parents, then comparing progeny allele counts to each
# parent
pc1 = p[:, 0, np.newaxis, :]
pc2 = p[:, 1, np.newaxis, :]
# find variants where parents don't share any alleles
is_shared_allele = (pc1 > 0) & (pc2 > 0)
no_shared_alleles = ~np.any(is_shared_allele, axis=2)
# find calls where progeny genotype is identical to one or the other parent
errors[
no_shared_alleles
& (np.all(o == pc1, axis=2)
| np.all(o == pc2, axis=2))
] = 1
# retrofit where either or both parent has a missing call
is_parent_missing = anhima.gt.is_missing(parental_genotypes)
errors[np.any(is_parent_missing, axis=1)] = 0
return errors
def diploid_mendelian_error(parental_genotypes, progeny_genotypes):
"""Find impossible genotypes according to Mendelian inheritance laws.
Parameters
----------
parental_genotypes : array_like, int
An array of shape (n_variants, 2, 2) where each element of the array
is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
progeny_genotypes : array_like, int
An array of shape (n_variants, n_progeny, 2) where each element of the
array is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
Returns
-------
errors : ndarray, uint8
An array of shape (n_variants, n_progeny) where each element counts
the number of non-Mendelian alleles in a progeny genotype call.
See Also
--------
count_diploid_mendelian_error
Notes
-----
Not applicable to polyploid genotype calls.
Applicable to multiallelic variants.
Assumes that genotypes are unphased.
"""
# check inputs
parental_genotypes = np.asarray(parental_genotypes)
progeny_genotypes = np.asarray(progeny_genotypes)
assert parental_genotypes.ndim == 3
assert progeny_genotypes.ndim == 3
# check the number of variants is equal in parents and progeny
assert parental_genotypes.shape[0] == progeny_genotypes.shape[0]
# check the number of parents
assert parental_genotypes.shape[1] == 2
# check the ploidy
assert parental_genotypes.shape[2] == progeny_genotypes.shape[2] == 2
# determine which implementation to use
max_allele = max(np.amax(parental_genotypes), np.amax(progeny_genotypes))
if max_allele < 2:
errors = diploid_mendelian_error_biallelic(parental_genotypes,
progeny_genotypes)
else:
errors = diploid_mendelian_error_multiallelic(parental_genotypes,
progeny_genotypes,
max_allele)
return errors
def count_diploid_mendelian_error(parental_genotypes,
progeny_genotypes,
axis=None):
"""Count impossible genotypes according to Mendelian inheritance laws,
summed over all progeny genotypes, or summed along variants or samples.
Parameters
----------
parental_genotypes : array_like, int
An array of shape (n_variants, 2, 2) where each element of the array
is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
progeny_genotypes : array_like, int
An array of shape (n_variants, n_progeny, 2) where each element of the
array is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
axis : int, optional
The axis along which to count (0 = variants, 1 = samples).
Returns
-------
n : int or array
If `axis` is None, returns the total number of Mendelian errors. If
`axis` is specified, returns the sum along the given `axis`.
See Also
--------
diploid_mendelian_error
"""
# sum errors
n = np.sum(diploid_mendelian_error(parental_genotypes,
progeny_genotypes),
axis=axis)
return n
def impute_inheritance_nearest(inheritance, pos, pos_impute):
"""Impute inheritance at unknown positions, by copying from
nearest neighbouring position where inheritance is known.
Parameters
----------
inheritance : array_like, int, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance state at the
known positions.
pos : array_like, int, shape (n_variants,)
Array of genomic positions at which `inheritance` was determined.
pos_impute : array_like, int
Array of positions at which to impute inheritance.
Returns
-------
imputed_inheritance : ndarray, int
An array of integers coding the imputed allelic inheritance.
"""
# check inputs
inheritance = np.asarray(inheritance)
assert inheritance.ndim == 2
pos = np.asarray(pos)
assert pos.ndim == 1
pos_impute = np.asarray(pos_impute)
assert pos_impute.ndim == 1
n_variants = pos.size
assert inheritance.shape[0] == n_variants
# find indices of neighbouring variants
indices_left = np.clip(np.searchsorted(pos, pos_impute), 0, n_variants - 1)
indices_right = np.clip(indices_left + 1, 0, n_variants - 1)
inh_left = np.take(inheritance, indices_left, axis=0)
inh_right = np.take(inheritance, indices_right, axis=0)
# find positions of neighbouring variants
pos_left = np.take(pos, indices_left)
pos_right = np.take(pos, indices_right)
# compute distance to neighbours
dist_left = np.abs(pos_impute - pos_left)
dist_right = np.abs(pos_right - pos_impute)
# build output
out = np.zeros_like(inh_left)
out[dist_left < dist_right] = inh_left[dist_left < dist_right]
out[dist_left > dist_right] = inh_right[dist_left > dist_right]
# # use neighbour from other side where missing
# override_left = ((dist_left < dist_right)[:, np.newaxis]
# & (out == INHERIT_MISSING))
# out[override_left] = inh_right[override_left]
# override_right = ((dist_left > dist_right)[:, np.newaxis]
# & (out == INHERIT_MISSING))
# out[override_right] = inh_left[override_right]
return out
def tabulate_inheritance_switches(inheritance, pos, gametes=None):
"""Tabulate switches in inheritance.
Parameters
----------
inheritance : array_like, int, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance state at the
known positions.
pos : array_like, int, shape (n_variants,)
Array of genomic positions at which `inheritance` was determined.
gametes : sequence, length (n_gametes), optional
Returns
-------
df : DataFrame
A table of all inheritance switches observed in the gametes,
where each row corresponds to a switch from one parental allele to
another. The table has a hierarchical index, where the first level
corresponds to the gamete.
See Also
--------
tabulate_inheritance_blocks
"""
# check inputs
inheritance = np.asarray(inheritance)
assert inheritance.ndim == 2
n_variants, n_gametes = inheritance.shape
pos = np.asarray(pos)
assert pos.ndim == 1
assert pos.size == n_variants
if gametes is None:
gametes = np.arange(n_gametes)
else:
gametes = np.asarray(gametes)
assert gametes.ndim == 1
assert gametes.size == n_gametes
states = INHERIT_PARENT1, INHERIT_PARENT2
dfs = [anhima.util.tabulate_state_transitions(inheritance[:, i],
states,
pos)
for i in range(n_gametes)]
df = pandas.concat(dfs, keys=gametes)
return df
def tabulate_inheritance_blocks(inheritance, pos, gametes=None):
"""Tabulate inheritance blocks.
Parameters
----------
inheritance : array_like, int, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance state at the
known positions.
pos : array_like, int, shape (n_variants,)
Array of genomic positions at which `inheritance` was determined.
gametes : sequence, length (n_gametes), optional
Returns
-------
df : DataFrame
A table of all inheritance blocks observed in the gametes,
where each row corresponds to a block of inheritance from a single
parent. The table has a hierarchical index, where the first level
corresponds to the gamete.
See Also
--------
tabulate_inheritance_switches
"""
# check inputs
inheritance = np.asarray(inheritance)
assert inheritance.ndim == 2
n_variants, n_gametes = inheritance.shape
pos = np.asarray(pos)
assert pos.ndim == 1
assert pos.size == n_variants
if gametes is None:
gametes = np.arange(n_gametes)
else:
gametes = np.asarray(gametes)
assert gametes.ndim == 1
assert gametes.size == n_gametes
states = INHERIT_PARENT1, INHERIT_PARENT2
dfs = [anhima.util.tabulate_state_blocks(inheritance[:, i],
states,
pos)
for i in range(n_gametes)]
df = | pandas.concat(dfs, keys=gametes) | pandas.concat |
"""
try to classify reddit posts.
"""
import os
import glob
from collections import defaultdict
from pprint import pprint
import time
from datetime import datetime
import pandas as pd
from sklearn_pandas import DataFrameMapper, cross_val_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection import SelectKBest, mutual_info_classif, f_classif
from sklearn.metrics import classification_report, precision_recall_curve
from sklearn.model_selection import cross_validate, KFold, train_test_split, GridSearchCV
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
def print_topk(k, feature_names, clf):
"""Prints features with the highest coefficient values, per class"""
topk = np.argsort(clf.coef_[0])[-k:]
print(
"{}".format(
" ".join(feature_names[j] for j in topk[::-1])
)
)
SCORES = [
'accuracy',
'roc_auc',
#'recall',
#'precision',
]
def run_experiment(X, y, max_features, feature_selector, args):
long_precision_recall_row_dicts = []
long_result_row_dicts = []
algo_to_score = defaultdict(dict)
clf_sets = []
C_vals = [0.1, 1, 10, 100]
for C_val in C_vals:
clf_sets += [
(LogisticRegression(C=C_val), 'logistic__c={}'.format(C_val), 'logistic', C_val, 'default'),
]
clf_sets += [
(LinearSVC(C=C_val), 'linearsvc__c={}'.format(C_val), 'linearsvc', C_val, 'default'),
]
if args.other_class_weights:
clf_sets += [
(
LogisticRegression(C=C_val, class_weight='balanced'),
'logistic__c={}__class_weight=balanced'.format(C_val),
'logistic',
C_val, 'balanced'
),
(
LogisticRegression(C=C_val, class_weight={0: 1, 1:50}), 'logistic__c={}__class_weight=50x'.format(C_val),
'logistic',
C_val, '50x'
),
(
LinearSVC(C=C_val, class_weight='balanced'), 'linearsvc__c={}__class_weight=balanced'.format(C_val),
'linearsvc',
C_val, 'balanced'
),
(
LinearSVC(C=C_val, class_weight={0: 1, 1:50}), 'linearsvc__c={}__class_weight=500x'.format(C_val),
'linearsvc',
C_val, '50x'
),
]
clf_sets += [
(DummyClassifier(strategy='most_frequent'), 'SelectNoSentences', 'SelectNoSentences', 0.1, 'default'),
#(DummyClassifier(strategy='constant', constant=1), 'SelectEverySentence',),
]
if args.data_dir == 'psa_research':
clf_sets += [
(DecisionTreeClassifier(), 'tree', 'tree', 0.1, 'default'),
(GaussianNB(), 'GaussianNb', 'GaussianNb', 0.1, 'default'),
(KNeighborsClassifier(3), '3nn', '3nn', 0.1, 'default'),
]
for clf, name, algo_name, C_val, weights in clf_sets:
# if name == 'logistic':
# clf.fit(X, data.has_citation)
# print_topk(10, mapper.transformed_names_, clf)
cv = KFold(n_splits=5, shuffle=True, random_state=0)
start = time.time()
scores = cross_validate(
clf, X=X, y=y, cv=cv,
scoring=SCORES)
ret = {}
for key, val in scores.items():
if 'test_' in key:
score = key.replace('test_', '')
ret[score] = np.mean(val)
ret[score + '_std'] = np.std(val)
algo_to_score[name] = ret
tic = round(time.time() - start, 3)
algo_to_score[name]['time'] = tic
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
cal_clf = CalibratedClassifierCV(clf)
cal_clf.fit(X_train, y_train)
y_proba = cal_clf.predict_proba(X_test)
precision, recall, thresholds = precision_recall_curve(
y_test, y_proba[:,1])
long_result_row_dict = {
'name': name,
'algo_name': algo_name,
'max_features': max_features,
'feature_selector': feature_selector.__name__,
'C_val': C_val,
'weights': weights,
'time': tic,
}
for score in SCORES:
long_result_row_dict[score] = algo_to_score[name][score]
long_result_row_dict[score + '_std'] = algo_to_score[name][score + '_std']
long_result_row_dicts.append(long_result_row_dict)
for precision_val, recall_val in zip(precision, recall):
long_precision_recall_row_dicts.append({
'precision': precision_val,
'recall': recall_val,
'name': name,
'max_features': max_features,
'feature_selector': feature_selector.__name__,
'algo_name': algo_name,
'C_val': C_val,
'weights': weights,
})
#print(name, tic)
result_df = pd.DataFrame(algo_to_score)
result_df.to_csv('results/{}/{}_{}.csv'.format(
args.data_dir,
max_features,
feature_selector.__name__,
))
maxes = {}
for key in SCORES:
max_idx = result_df.loc[key].idxmax()
max_val = result_df.loc[key, max_idx]
maxes[key] = [max_val, max_idx]
#print(maxes)
results_df = pd.DataFrame(long_result_row_dicts)
long_precision_recall_df = | pd.DataFrame(long_precision_recall_row_dicts) | pandas.DataFrame |
from azul import price_manager_registry, BasePriceManager
import pyEX
import pandas as pd
import pytz
from datetime import datetime, timedelta
from typing import Tuple
import logbook
log = logbook.Logger('IEXPriceManager')
@price_manager_registry.register('iex')
class IEXPriceManager(BasePriceManager):
def __init__(self):
super().__init__()
def _validated_start_and_end_dates(
self,
start_date: datetime,
end_date: datetime
) -> Tuple[datetime, datetime]:
"""
Creates valid start and end dates. Ensures start date is no greater than 30 calendar days ago (IEX limitation).
Args:
start_date (datetime): The start date.
end_date (datetime): The end date.
Returns:
start_date (datetime): The validated start date.
end_date (datetime): The validated end date.
"""
today = datetime.today()
thirty_days_ago = today - timedelta(days=30)
# Ensure the start (and end) date are not greater than 30 days ago (the limitation on minute data).
if start_date is not None:
if (today - start_date).days > 30:
start_date = thirty_days_ago
log.info('IEX start date can be at most 30 calendar days ago. Resetting to: %s' % start_date.date())
if end_date is not None:
if (today - end_date).days > 30:
log.info('IEX end date can be at most 30 calendar days ago. Resetting to: %s' % end_date.date())
end_date = today
if start_date is None:
start_date = thirty_days_ago
if end_date is None:
end_date = today
# Sanity check the results.
if start_date > end_date:
temp_date = start_date
start_date = end_date
end_date = temp_date
return start_date, end_date
def _minute_dataframe_for_date(self, ticker: str, start_timestamp: pd.Timestamp) -> pd.DataFrame:
ret_df = pd.DataFrame()
df = pyEX.chartDF(ticker, timeframe='1d', date=start_timestamp)
if df.empty:
return ret_df
df = df.reset_index()
df['volume'] = df['volume'].astype('int')
df['date'] = df['date'].astype('str')
df['minute'] = df['minute'].astype('str')
df['datet'] = df['date'] + ' ' + df['minute']
df['dividend'] = 0.0
df['split'] = 1.0
df.drop(['date', 'minute', 'average', 'changeOverTime', 'close', 'high', 'label', 'low', 'marketAverage',
'marketChangeOverTime', 'marketNotional', 'marketNumberOfTrades', 'notional', 'numberOfTrades',
'open', 'volume'], axis=1, level=None, inplace=True, errors='ignore')
df.rename(columns={'datet': 'date', 'marketClose': 'close', 'marketHigh': 'high', 'marketLow': 'low',
'marketOpen': 'open', 'marketVolume': 'volume'}, inplace=True)
df.date = pd.to_datetime(df.date, errors='coerce', utc=False, infer_datetime_format=True)
df = df[~df.date.isnull()]
df.set_index('date', drop=True, append=False, inplace=True, verify_integrity=True)
utc = pytz.utc
nytz = pytz.timezone('US/Eastern')
df = df.tz_localize(nytz, axis=0, level=None, copy=False, ambiguous='raise')
df.index = df.index.tz_convert(utc)
if not ( | pd.Series(['close', 'high', 'low', 'open']) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 13:44:55 2017
@author: rgryan
#=====================================================================================================================
# This code takes a folder, with subfolders containing .std spectra (outputted from DOASIS), and converts them all
# to line-format files. Each line in the file is one spectrum.
# The line formatting is appropriate for reading in by QDOAS.
# This code has been updated so it handle calibration and direct sun spectra
# It has now been updated SO THAT IT CORRECTLY SUBTRACTS THE OFFSET AND DARK CURRENT SPECTRA.
#=====================================================================================================================
# Updated 03-10-2017,
# For the Broady MAX-DOAS intercomparison campaign
# RGRyan
#=====================================================================================================================
# data save in the following format:
# st_ddmmyyyy_Uxxxxxxx
# where st = spectrum type (sc = scattered light, ds = direct sun, dc = dark current cal, oc = offset cal)
# ddmmyyyy = date
# U (or V) indicates UV (or Visible) spectrum
# xxxxxxx is the 7 digit folder number from DOASIS
# (this is needed for the iteration thru folders, rather than strictly needed for naming purposes)
#=====================================================================================================================
# What needs to be varied?
# 1. the folderpath and folderdate, specific to the main folder you're looking in
# 2. The foldernumber (this needs to be the number of the first subfolder you want the program to go to)
# 3. The lastfolder number (this tells the program when to stop looking and converting)
# 4. The folder letter. Once all the "U"s are converted, then you have to change this in order to convert all
# the "V"s
# 5. Whether you want to do the offset and dark current correction
"""
# Section of things to check or change
#=====================================================================================================================
folderpath = 'C:/Users/rgryan/Google Drive/Documents/PhD/Data/Broady_data_backup/UWollongong/SP2017a/SP1703/'
foldernumber = 0 # The initial subfolder number, where the program will start
lastfolder = 100 # The final subfolder number, after which the program will stop converting
folders0indexed = True # folder numbers starting with zero?
folderletter = 'U' # 'U' for UV spectra, 'V' for visible spectra
correct_dc = True # 'False' turns off the dark current correction
correct_os = True # 'False' turns off the offset correction
only_save_hs_int = True # True if wanting to save an abridged version of the horizon scan data, with only
# intensity values, not all the spectral data
calcCI = True # Calculate colour index?
CIn = 330 # Numerator for color index
CId = 390 # denominator for color index
saveSC = True # save scattered light spectra?
saveDS = False # save direct sun spectra?
saveHS = True # save horizon scan spectra?
saveDC = True # save dark current calibrations?
saveOS = True # save offset calibrations?
saveCI = True # save colour index results?
inst = 'UW' # The data from which institution is being plotted? <just for saving purposes!>
# UM = Uni. of Melb, UW = Wollongong Uni, NZ = NIWA New Zealand,
# BM = Bureau of Meteorology Broadmeadows
# Date format
date_format_1 = True # For date format in UniMelb MS-DOAS STD spectra, MM/DD/YYYY
date_format_2 = False # For date format in UW'gong MS-DOAS STD spectra, DD-Mon-YYYY
# settings for saving
end = ".txt"
path2save = folderpath[3:]+folderletter+'\\'
# Import section
#=====================================================================================================================
import numpy as np
import glob
import pandas as pd
# Section to deal with dark and offset calibration spectra
#=====================================================================================================================
if inst == 'UM':
UVoc__ = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_UV_offset.std'
visoc__ = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_vis_offset.std'
UVdc__= 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_UV_darkcurrent.std'
visdc__ = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_vis_darkcurrent.std'
Uwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_UVcal.txt'
Vwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UM_calfiles\\UM_viscal.txt'
elif inst == 'BM':
UVoc__ = 'E:/PhD/Broady_data_backup/TIMTAM_ref_files/BM_calfiles/ofsuv_U0000003.std'
visoc__ = 'E:/PhD/Broady_data_backup/TIMTAM_ref_files/BM_calfiles/ofsvis_V0000003.std'
visdc__ = 'E:/PhD/Broady_data_backup/TIMTAM_ref_files/BM_calfiles/dcvis_V0000005.std'
UVdc__ = 'E:/PhD/Broady_data_backup/TIMTAM_ref_files/BM_calfiles/dcuv_U0000005.std'
Uwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\BM_calfiles\\BM_UVcal.txt'
Vwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\BM_calfiles\\BM_viscal.txt'
elif inst == 'UW':
UVoc__ = 'E:/PhD/Broady_data_backup/UWollongong/Cals/offset_U_UW.std'
visoc__ = 'E:/PhD/Broady_data_backup/UWollongong/Cals/offset_V_UW.std'
visdc__ = 'E:/PhD/Broady_data_backup/UWollongong/Cals/dc_V_UW.std'
UVdc__ = 'E:/PhD/Broady_data_backup/UWollongong/Cals/dc_U_UW.std'
Uwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UW_calfiles\\UW_UVcal.txt'
Vwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\UW_calfiles\\UW_viscal.txt'
elif inst == 'NZ':
UVoc__ = 'E:/PhD/Broady_data_backup/NIWA/NIWA cal files/OFS_U0060764.std'
UVdc__ = 'E:/PhD/Broady_data_backup/NIWA/NIWA cal files/DC_U0060763.std'
visoc__ = 'C:\\Users\\rgryan\\Google Drive\\Documents\\PhD\\Data\\Broady_data_backup\\NIWA\\spectra\\NZ_STD_Spectra_V\\OFS_V0060764.std'
visdc__ = 'C:\\Users\\rgryan\\Google Drive\\Documents\\PhD\\Data\\Broady_data_backup\\NIWA\\spectra\\NZ_STD_Spectra_V\\DC_V0060763.std'
Uwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\NZ_calfiles\\NZ_UVcal.txt'
Vwlcal = 'C:\\Users\\rgryan\\Google Drive\\TIMTAM\\NZ_calfiles\\NZ_viscal.txt'
else:
print('Error - Offset or DC cal files not defined')
# Read in Offset calibration for UV and Vis
# ==========================================
UVoc_path = open(UVoc__, 'r')
UVoc_data = UVoc_path.readlines()
UVoc_data_strpd = [(UVoc_data[i].strip('\n')) for i in range(len(UVoc_data))]
visoc_path = open(visoc__, 'r')
visoc_data = visoc_path.readlines()
visoc_data_strpd = [(visoc_data[i].strip('\n')) for i in range(len(visoc_data))]
# Find the data in the offset calibration spectrum
# ==========================================
if folderletter == 'U':
ocCal_ = UVoc_data_strpd[3:2051]
elif folderletter == 'V':
ocCal_ = visoc_data_strpd[3:2051]
ocCal = [float(i) for i in ocCal_]
# Dark current calibration readin
# ==========================================
UVdc_path = open(UVdc__, 'r')
UVdc_data = UVdc_path.readlines()
UVdc_data_strpd = [(UVdc_data[i].strip('\n')) for i in range(len(UVdc_data))]
visdc_path = open(visdc__, 'r')
visdc_data = visdc_path.readlines()
visdc_data_strpd = [(visdc_data[i].strip('\n')) for i in range(len(visdc_data))]
if folderletter == 'U':
dcCal_ = UVdc_data_strpd[3:2051]
elif folderletter == 'V':
dcCal_ = visdc_data_strpd[3:2051]
dcCal = [float(i) for i in dcCal_]
# Find the number of scans and the exposure time for the calibration spectra
#===================================================================
oc_numscans_ = UVoc_data_strpd[2059]
oc_numscansX = oc_numscans_.split()
oc_numscans = float(oc_numscansX[1])
oc_texp_ = UVoc_data_strpd[2072] # time in ms
oc_texpX = oc_texp_.split()
oc_texp = float(oc_texpX[2])
oc_inttime = oc_texp*oc_numscans
dc_numscans_ = UVdc_data_strpd[2059]
dc_numscansX = dc_numscans_.split()
dc_numscans = float(dc_numscansX[1])
dc_texp_ = UVdc_data_strpd[2072] # time in ms
dc_texpX = dc_texp_.split()
dc_texp = float(dc_texpX[2])
dc_inttime = dc_numscans*dc_texp
#===================================================================
# Calibration spectra process
# 1. Offset spectrum is proportional to number of scans. Therefore need to divide by number of scans
if correct_os == True:
ocCal_c1 = [(ocCal[i]/oc_numscans) for i in range(len(ocCal))] # This has units of counts/scan
else:
ocCal_c1 = [(0) for i in range(len(ocCal))]
# 2. Correct dark-current spectrum for the offset
if correct_dc == True:
dcCal_c1 = [(dcCal[i] - ((ocCal_c1[i])*dc_numscans)) for i in range(len(dcCal))] # this has units of counts
dcCal_c = [((dcCal_c1[i]/dc_inttime)) for i in range(len(dcCal))] # this has units of counts/ms
else:
dcCal_c = [(0) for i in range(len(dcCal))]
# 3. Correct offset spectrum using corrected dark current spectrum
if correct_os == True:
ocCal_c2 = [(ocCal[i] - (dcCal_c[i]*oc_inttime)) for i in range(len(ocCal))] # this has units of counts
ocCal_c = [(ocCal_c2[i]/oc_numscans) for i in range(len(ocCal_c2))] # this has units of counts/scan
else:
ocCal_c = [(0) for i in range(len(ocCal))]
# corrected dark current passed to the next stage in units of counts/ms
# corrected offeset spectrum passed to the next stage in units of counts/scan
# Create wavelength cal dataframe so we only need to do this once, only to be
# used if Colour Index calculation is performed
if folderletter == 'U':
w = open(Uwlcal, 'r')
else:
w = open(Vwlcal, 'r')
wl_data = w.readlines()
wl_data_strpd = []
for i in range(len(wl_data)):
wl_data_strpd.append(wl_data[i].strip('\n'))
#%%
lastfolderplus1 = lastfolder+1
while foldernumber < lastfolderplus1:
# Empty lists and data frames to write to;
sc_list = [] # for scattered light measurements
ds_list = [] # for direct sun measurements
dc_list = [] # for dark current cakibration measurements
oc_list = [] # for offset calibration measurements
hs_list = [] # for horizon scan measurements
ci_list = []
sc_frame_to_fill = pd.DataFrame()
ds_frame_to_fill = pd.DataFrame()
oc_frame_to_fill = pd.DataFrame()
dc_frame_to_fill = | pd.DataFrame() | pandas.DataFrame |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty], index=['ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_instruments_two_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr1 = 'ESZ15'
instr2 = 'CLZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.define_generic("CL", "CAD", 0.1, 1, 2.5)
blt.map_instrument("CL", "CLZ15")
blt._trade(ts, instr1, qty, price)
blt._trade(ts, instr2, qty, price)
instrs = blt.get_instruments()
instrs_exp = | pd.Series([qty, qty], index=['CLZ15', 'ESZ15']) | pandas.Series |
import pandas as pd
import os
import matplotlib.pyplot as plt
import random
import numpy as np
def countChannelsInBarcodeList(path_to_decoded_genes: str):
'''
This function focuses on all stats that are purely based on how many times a certain channel was called, in what round.
This can be useful in debugging certain weird decoding behaviour, like finding wether a channel is overexpressed.
'''
df = pd.read_csv(path_to_decoded_genes)
barcodes = list(df['Barcode'])
# resulting df will have the following columns:
# 'Channel' 'Round' 'total channel count'
total_channel_counts= {}
channel_dict = {} # takes tuples of round/channel as key, and number of times encountered as value
for element in barcodes:
element_list = [int(digit) for digit in str(element)]
for i,channel_nr in enumerate(element_list):
round_nr = i+1 # Because enumerating starts with 0
# increment the tuple combination fo round/channel with one
channel_dict[(round_nr,channel_nr)] = channel_dict.get((round_nr,channel_nr), 0) + 1
total_channel_counts[channel_nr] = total_channel_counts.get(channel_nr, 0) + 1
rows_list = []
col_names = ['round_nr', 'channel_nr', 'count']
#grouped_by_channel_dict = {}
# Create the rows in the dataframe by representing them as dataframes
for k,count in channel_dict.items():
temp_dict = {}
round_nr, channel_nr = k
row_values = [round_nr, channel_nr, count]
temp_dict = {col_names[i]: row_values[i] for i in range(0,len(col_names)) }
rows_list.append(temp_dict)
count_df = | pd.DataFrame(rows_list) | pandas.DataFrame |
"""
Module for legacy LEAP dataset.
"""
import json
import os
import numpy as np
import pandas as pd
from typing import List
from sleap.util import json_loads
from sleap.io.video import Video
from sleap.instance import (
LabeledFrame,
PredictedPoint,
PredictedInstance,
Track,
Point,
Instance,
)
from sleap.skeleton import Skeleton
def load_predicted_labels_json_old(
data_path: str,
parsed_json: dict = None,
adjust_matlab_indexing: bool = True,
fix_rel_paths: bool = True,
) -> List[LabeledFrame]:
"""
Load predicted instances from Talmo's old JSON format.
Args:
data_path: The path to the JSON file.
parsed_json: The parsed json if already loaded, so we can save
some time if already parsed.
adjust_matlab_indexing: Whether to adjust indexing from MATLAB.
fix_rel_paths: Whether to fix paths to videos to absolute paths.
Returns:
List of :class:`LabeledFrame` objects.
"""
if parsed_json is None:
data = json.loads(open(data_path).read())
else:
data = parsed_json
videos = pd.DataFrame(data["videos"])
predicted_instances = pd.DataFrame(data["predicted_instances"])
predicted_points = pd.DataFrame(data["predicted_points"])
if adjust_matlab_indexing:
predicted_instances.frameIdx -= 1
predicted_points.frameIdx -= 1
predicted_points.node -= 1
predicted_points.x -= 1
predicted_points.y -= 1
skeleton = Skeleton()
skeleton.add_nodes(data["skeleton"]["nodeNames"])
edges = data["skeleton"]["edges"]
if adjust_matlab_indexing:
edges = np.array(edges) - 1
for (src_idx, dst_idx) in edges:
skeleton.add_edge(
data["skeleton"]["nodeNames"][src_idx],
data["skeleton"]["nodeNames"][dst_idx],
)
if fix_rel_paths:
for i, row in videos.iterrows():
p = row.filepath
if not os.path.exists(p):
p = os.path.join(os.path.dirname(data_path), p)
if os.path.exists(p):
videos.at[i, "filepath"] = p
# Make the video objects
video_objects = {}
for i, row in videos.iterrows():
if videos.at[i, "format"] == "media":
vid = Video.from_media(videos.at[i, "filepath"])
else:
vid = Video.from_hdf5(
filename=videos.at[i, "filepath"], dataset=videos.at[i, "dataset"]
)
video_objects[videos.at[i, "id"]] = vid
track_ids = predicted_instances["trackId"].values
unique_track_ids = np.unique(track_ids)
spawned_on = {
track_id: predicted_instances.loc[predicted_instances["trackId"] == track_id][
"frameIdx"
].values[0]
for track_id in unique_track_ids
}
tracks = {
i: Track(name=str(i), spawned_on=spawned_on[i])
for i in np.unique(predicted_instances["trackId"].values).tolist()
}
# A function to get all the instances for a particular video frame
def get_frame_predicted_instances(video_id, frame_idx):
points = predicted_points
is_in_frame = (points["videoId"] == video_id) & (
points["frameIdx"] == frame_idx
)
if not is_in_frame.any():
return []
instances = []
frame_instance_ids = np.unique(points["instanceId"][is_in_frame])
for i, instance_id in enumerate(frame_instance_ids):
is_instance = is_in_frame & (points["instanceId"] == instance_id)
track_id = predicted_instances.loc[
predicted_instances["id"] == instance_id
]["trackId"].values[0]
match_score = predicted_instances.loc[
predicted_instances["id"] == instance_id
]["matching_score"].values[0]
track_score = predicted_instances.loc[
predicted_instances["id"] == instance_id
]["tracking_score"].values[0]
instance_points = {
data["skeleton"]["nodeNames"][n]: PredictedPoint(
x, y, visible=v, score=confidence
)
for x, y, n, v, confidence in zip(
*[
points[k][is_instance]
for k in ["x", "y", "node", "visible", "confidence"]
]
)
}
instance = PredictedInstance(
skeleton=skeleton,
points=instance_points,
track=tracks[track_id],
score=match_score,
)
instances.append(instance)
return instances
# Get the unique labeled frames and construct a list of LabeledFrame objects for them.
frame_keys = list(
{
(videoId, frameIdx)
for videoId, frameIdx in zip(
predicted_points["videoId"], predicted_points["frameIdx"]
)
}
)
frame_keys.sort()
labels = []
for videoId, frameIdx in frame_keys:
label = LabeledFrame(
video=video_objects[videoId],
frame_idx=frameIdx,
instances=get_frame_predicted_instances(videoId, frameIdx),
)
labels.append(label)
return labels
def load_labels_json_old(
data_path: str,
parsed_json: dict = None,
adjust_matlab_indexing: bool = True,
fix_rel_paths: bool = True,
) -> List[LabeledFrame]:
"""
Load predicted instances from Talmo's old JSON format.
Args:
data_path: The path to the JSON file.
parsed_json: The parsed json if already loaded, so we can save
some time if already parsed.
adjust_matlab_indexing: Whether to adjust indexing from MATLAB.
fix_rel_paths: Whether to fix paths to videos to absolute paths.
Returns:
A newly constructed Labels object.
"""
if parsed_json is None:
data = json_loads(open(data_path).read())
else:
data = parsed_json
videos = | pd.DataFrame(data["videos"]) | pandas.DataFrame |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import pandas as pd
import numpy as np
import geopandas as gpd
from shapely.geometry import Polygon, LineString
import urllib.request
from urllib import parse
import urllib
import json
import re
from .plotmap import read_mapboxtoken
from .coordinates import (
gcj02towgs84,
bd09towgs84,
bd09mctobd09,
wgs84togcj02
)
def getadmin(keyword, ak,jscode='', subdistricts=False):
'''
Input the keyword and the Amap ak. The output is the GIS file of
the administrative boundary (Only in China)
Parameters
-------
keywords : str
The keyword. It might be the city name such as Shengzheng, or
the administrative code such as 440500
ak : str
Amap accesstoken
jscode : jscode
Amap safty code
subdistricts : bool
Whether to output the information of the administrative district
boundary
Returns
-------
admin : GeoDataFrame
Administrative district(WGS84)
districts : DataFrame
The information of subdistricts. This can be used to further get
the boundary of lower level districts
'''
# API url
url = 'https://restapi.amap.com/v3/config/district?'
# Condition
dict1 = {
'subdistrict': '3',
'showbiz': 'false',
'extensions': 'all',
'key': ak,
'jscode':jscode,
's': 'rsv3',
'output': 'json',
'level': 'district',
'keywords': keyword,
'platform': 'JS',
'logversion': '2.0',
'sdkversion': '1.4.10'
}
url_data = parse.urlencode(dict1)
url = url+url_data
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
webpage = response.read()
result = json.loads(webpage.decode('utf8', 'ignore'))
# Organize Data
datas = []
k = 0
polyline = result['districts'][k]['polyline']
polyline1 = polyline.split('|')
res = []
for polyline2 in polyline1:
polyline2 = polyline2.split(';')
p = []
for i in polyline2:
a, b = i.split(',')
p.append([float(a), float(b)])
x = pd.DataFrame(p)
x[0], x[1] = gcj02towgs84(x[0], x[1])
p = x.values
res.append(Polygon(p))
data = pd.DataFrame()
data1 = pd.DataFrame()
data1['geometry'] = res
data1 = gpd.GeoDataFrame(data1)
poly = data1.unary_union
data['geometry'] = [poly]
try:
data['citycode'] = result['districts'][k]['citycode']
except Exception:
pass
try:
data['adcode'] = result['districts'][k]['adcode']
except Exception:
pass
try:
data['name'] = result['districts'][k]['name']
except Exception:
pass
try:
data['level'] = result['districts'][k]['level']
except Exception:
pass
try:
data['center'] = result['districts'][k]['center']
except Exception:
pass
datas.append(data)
datas = pd.concat(datas)
admin = gpd.GeoDataFrame(datas)
if subdistricts:
districts = result['districts'][k]['districts']
districts = | pd.DataFrame(districts) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as st
from sklearn.neighbors import BallTree
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
from xgbse.non_parametric import (
calculate_kaplan_vectorized,
get_time_bins,
calculate_interval_failures,
)
# at which percentiles will the KM predict
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanNeighbors(XGBSEBaseEstimator):
"""
Convert xgboost into a nearest neighbor model, where we use hamming distance to define
similar elements as the ones that co-ocurred the most at the ensemble terminal nodes.
Then, at each neighbor-set compute survival estimates with the Kaplan-Meier estimator.
!!! Note
* We recommend using dart as the booster to prevent any tree
to dominate variance in the ensemble and break the leaf co-ocurrence similarity logic.
* This method can be very expensive at scales of hundreds of thousands of samples,
due to the nearest neighbor search, both on training (construction of search index) and scoring (actual search).
Read more in [How XGBSE works](https://loft-br.github.io/xgboost-survival-embeddings/how_xgbse_works.html).
"""
def __init__(self, xgb_params=None, n_neighbors=30, radius=None):
"""
Args:
xgb_params (Dict): Parameters for XGBoost model.
If not passed, the following default parameters will be used:
```
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
```
Check <https://xgboost.readthedocs.io/en/latest/parameter.html> for more options.
n_neighbors (Int): Number of neighbors for computing KM estimates
radius (Float): If set, uses a radius around the point for neighbors search
"""
if xgb_params is None:
xgb_params = DEFAULT_PARAMS
self.xgb_params = xgb_params
self.n_neighbors = n_neighbors
self.radius = radius
self.persist_train = False
self.index_id = None
self.radius = None
self.feature_importances_ = None
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=True,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and outputting its leaf indices.
Build search index in the new space to allow nearest neighbor queries at scoring time.
Args:
X ([pd.DataFrame, np.array]): Design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): Binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): Level of verbosity. See xgboost.train documentation.
persist_train (Bool): Whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): User defined index if intended to use explainability
through prototypes
time_bins (np.array): Specified time windows to use when making survival predictions
Returns:
XGBSEKaplanNeighbors: Fitted instance of XGBSEKaplanNeighbors
"""
self.E_train, self.T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(self.T_train, self.E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
self.feature_importances_ = self.bst.get_score()
# creating nearest neighbor index
leaves = self.bst.predict(
dtrain, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
self.tree = BallTree(leaves, metric="hamming", leaf_size=40)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.index_id = index_id
return self
def predict(
self,
X,
time_bins=None,
return_ci=False,
ci_width=0.683,
return_interval_probs=False,
):
"""
Make queries to nearest neighbor search index build on the transformed XGBoost space.
Compute a Kaplan-Meier estimator for each neighbor-set. Predict the KM estimators.
Args:
X (pd.DataFrame): Dataframe with samples to generate predictions
time_bins (np.array): Specified time windows to use when making survival predictions
return_ci (Bool): Whether to return confidence intervals via the Exponential Greenwood formula
ci_width (Float): Width of confidence interval
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
(pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): Upper confidence interval for the survival
probability values
lower_ci (np.array): Lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(
d_matrix, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
if self.radius:
assert self.radius > 0, "Radius must be positive"
neighs, _ = self.tree.query_radius(
leaves, r=self.radius, return_distance=True
)
number_of_neighbors = np.array([len(neigh) for neigh in neighs])
if np.argwhere(number_of_neighbors == 1).shape[0] > 0:
# If there is at least one sample without neighbors apart from itself
# a warning is raised suggesting a radius increase
warnings.warn(
"Warning: Some samples don't have neighbors apart from itself. Increase the radius",
RuntimeWarning,
)
else:
_, neighs = self.tree.query(leaves, k=self.n_neighbors)
# gathering times and events/censors for neighbor sets
T_neighs = self.T_train[neighs]
E_neighs = self.E_train[neighs]
# vectorized (very fast!) implementation of Kaplan Meier curves
if time_bins is None:
time_bins = self.time_bins
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
preds_df, upper_ci, lower_ci = calculate_kaplan_vectorized(
T_neighs, E_neighs, time_bins, z
)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
def _align_leaf_target(neighs, target):
# getting times and events for each leaf element
target_neighs = neighs.apply(lambda x: target[x])
# converting to vectorized kaplan format
# filling nas due to different leaf sizes with 0
target_neighs = (
pd.concat([pd.DataFrame(e) for e in target_neighs.values], axis=1)
.T.fillna(0)
.values
)
return target_neighs
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanTree(XGBSEBaseEstimator):
"""
Single tree implementation as a simplification to `XGBSEKaplanNeighbors`.
Instead of doing nearest neighbor searches, fits a single tree via `xgboost`
and calculates KM curves at each of its leaves.
!!! Note
* It is by far the most efficient implementation, able to scale to millions of examples easily.
At fit time, the tree is built and all KM curves are pre-calculated,
so that at scoring time a simple query will suffice to get the model's estimates.
Read more in [How XGBSE works](https://loft-br.github.io/xgboost-survival-embeddings/how_xgbse_works.html).
"""
def __init__(
self,
xgb_params=None,
):
"""
Args:
xgb_params (Dict): Parameters for XGBoost model.
If not passed, the following default parameters will be used:
```
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
```
Check <https://xgboost.readthedocs.io/en/latest/parameter.html> for more options.
"""
if xgb_params is None:
xgb_params = DEFAULT_PARAMS_TREE
self.xgb_params = xgb_params
self.persist_train = False
self.index_id = None
self.feature_importances_ = None
def fit(
self,
X,
y,
persist_train=True,
index_id=None,
time_bins=None,
ci_width=0.683,
**xgb_kwargs,
):
"""
Fit a single decision tree using xgboost. For each leaf in the tree,
build a Kaplan-Meier estimator.
!!! Note
* Differently from `XGBSEKaplanNeighbors`, in `XGBSEKaplanTree`, the width of
the confidence interval (`ci_width`) must be specified at fit time.
Args:
X ([pd.DataFrame, np.array]): Design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): Binary event indicator as first field,
and time of event or time of censoring as second field.
persist_train (Bool): Whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): User defined index if intended to use explainability
through prototypes
time_bins (np.array): Specified time windows to use when making survival predictions
ci_width (Float): Width of confidence interval
Returns:
XGBSEKaplanTree: Trained instance of XGBSEKaplanTree
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# training XGB
self.bst = xgb.train(self.xgb_params, dtrain, num_boost_round=1, **xgb_kwargs)
self.feature_importances_ = self.bst.get_score()
# getting leaves
leaves = self.bst.predict(
dtrain, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
# organizing elements per leaf
leaf_neighs = (
| pd.DataFrame({"leaf": leaves}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
| pd.Timestamp('2011-01-04', tz=tz) | pandas.Timestamp |
'''
Assorted functions used in the machine learning codes during the Blue Stars project
Created by: <NAME> (<EMAIL>)
'''
#External packages and functions used
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import RepeatedKFold
from sklearn.pipeline import Pipeline
from sklearn.metrics import (mean_absolute_error, r2_score, max_error, mean_squared_error)
from itertools import combinations
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
import time
#Sets of filters and corrections used
Filters = {'JPLUS': ['uJAVA', 'J0378', 'J0395', 'J0410', 'J0430', 'gSDSS',
'J0515', 'rSDSS', 'J0660', 'iSDSS', 'J0861', 'zSDSS'],
'WISE': ['W1', 'W2', 'W3', 'W4'],
'GALEX': ['NUVmag'],
'GAIA': ['G', 'BP', 'RP']
}
Corrections = {'JPLUS': [('Ax_' + Filter) for Filter in Filters['JPLUS']]}
def MagnitudeCorrection(df, FilterSet, CorrectionSet, NewDF):
'''
Correct the magnitudes of a set of filters inside a dataframe
Keyword Arguments:
df - Dataframe with uncorrected magnitudes
FilterSet - Set of filters to correct
CorrectionSet - Set of corrections
NewDF - If True, a new dataframe is returned with just the corrected values;
If False, the function returns the complete original dataframe with the uncorrected values replaced by the corrected ones.
'''
if NewDF == True:
TempDF = pd.DataFrame()
for Index in range(0, len(FilterSet)):
TempDF[FilterSet[Index]] = df[FilterSet[Index]] - df[CorrectionSet[Index]]
return TempDF
else:
for Index in range(0, len(FilterSet)):
df[FilterSet[Index]] = df[FilterSet[Index]] - df[CorrectionSet[Index]]
return df
def CreateColors(df, FilterSet, NewDF):
'''
Create all the possible filter combinations (colors) for a set of filters inside a dataframe
Keyword arguments:
df - Dataframe with the magnitudes
FilterSet - Set of filters to combine
NewDF - If True, a new dataframe is returned with just the combined values;
If False, the function returns the complete original dataframe with the combinations added.
'''
CombinationsList = list(combinations(FilterSet, 2))
if NewDF == True:
TempDF = pd.DataFrame()
for Combination in CombinationsList:
CombinationName = '(' + Combination[0] + ' - ' + Combination[1] + ')'
TempDF[CombinationName] = (df[Combination[0]] - df[Combination[1]])
return TempDF
else:
for Combination in CombinationsList:
CombinationName = '(' + Combination[0] + ' - ' + Combination[1] + ')'
df[CombinationName] = (df[Combination[0]] - df[Combination[1]])
return df
def CreateCombinations(df, FilterSet, NewDF):
'''
Create all the possible color combinations for a set of filters inside a dataframe
Keyword arguments:
df - Dataframe with the magnitudes
FilterSet - Set of filters to combine
NewDF - If True, a new dataframe is returned with just the combined values;
If False, the function returns the complete original dataframe with the combinations added.
'''
CombinationsList = list(combinations(FilterSet, 4))
if NewDF == True:
TempDF = | pd.DataFrame() | pandas.DataFrame |
"""
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
from functools import wraps
from numpy import NaN
import numpy as np
from pandas.core.api import DataFrame, Series, Panel, notnull
import pandas.algos as algos
import pandas.core.common as com
from pandas.core.common import _values_from_object
from pandas.util.decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_corr_pairwise', 'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply', 'expanding_corr_pairwise']
#------------------------------------------------------------------------------
# Docs
_doc_template = """
%s
Parameters
----------
%s
window : Number of observations used for calculating statistic
min_periods : int
Minimum number of observations in window required to have a value
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
time_rule is a legacy alias for freq
Returns
-------
%s
"""
_ewm_doc = r"""%s
Parameters
----------
%s
com : float. optional
Center of mass: :math:`\alpha = 1 / (1 + com)`,
span : float, optional
Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`
halflife : float, optional
Specify decay in terms of halflife, :math: `\alpha = 1 - exp(log(0.5) / halflife)`
min_periods : int, default 0
Number of observations in sample to require (only affects
beginning)
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
time_rule is a legacy alias for freq
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
%s
Notes
-----
Either center of mass or span must be specified
EWMA is sometimes specified using a "span" parameter s, we have have that the
decay parameter :math:`\alpha` is related to the span as
:math:`\alpha = 2 / (s + 1) = 1 / (1 + c)`
where c is the center of mass. Given a span, the associated center of mass is
:math:`c = (s - 1) / 2`
So a "20-day EWMA" would have center 9.5.
Returns
-------
y : type of input argument
"""
_expanding_doc = """
%s
Parameters
----------
%s
min_periods : int
Minimum number of observations in window required to have a value
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
Returns
-------
%s
"""
_type_of_input = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_unary_arg = "arg : Series, DataFrame"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray"""
_bias_doc = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def rolling_count(arg, window, freq=None, center=False, time_rule=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
time_rule : Legacy alias for freq
Returns
-------
rolling_count : type of caller
"""
arg = _conv_timerule(arg, freq, time_rule)
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=1,
center=center) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
@Substitution("Unbiased moving covariance", _binary_arg_flex, _flex_retval)
@Appender(_doc_template)
def rolling_cov(arg1, arg2, window, min_periods=None, freq=None,
center=False, time_rule=None):
arg1 = _conv_timerule(arg1, freq, time_rule)
arg2 = _conv_timerule(arg2, freq, time_rule)
window = min(window, len(arg1), len(arg2))
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods,center=center)
count = rolling_count(X + Y, window,center=center)
bias_adj = count / (count - 1)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov)
return rs
@Substitution("Moving sample correlation", _binary_arg_flex, _flex_retval)
@Appender(_doc_template)
def rolling_corr(arg1, arg2, window, min_periods=None, freq=None,
center=False, time_rule=None):
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq,
center=center, time_rule=time_rule)
den = (rolling_std(a, window, min_periods, freq=freq,
center=center, time_rule=time_rule) *
rolling_std(b, window, min_periods, freq=freq,
center=center, time_rule=time_rule))
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr)
def _flex_binary_moment(arg1, arg2, f):
if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and
isinstance(arg2,(np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if isinstance(arg1, (np.ndarray,Series)) and isinstance(arg2, (np.ndarray,Series)):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
results = {}
if isinstance(arg2, DataFrame):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
else:
res_columns = arg1.columns
X, Y = arg1.align(arg2, axis=0, join='outer')
results = {}
for col in res_columns:
results[col] = f(X[col], Y)
return DataFrame(results, index=X.index, columns=res_columns)
else:
return _flex_binary_moment(arg2, arg1, f)
def rolling_corr_pairwise(df, window, min_periods=None):
"""
Computes pairwise rolling correlation matrices as Panel whose items are
dates
Parameters
----------
df : DataFrame
window : int
min_periods : int, default None
Returns
-------
correls : Panel
"""
from pandas import Panel
from collections import defaultdict
all_results = defaultdict(dict)
for i, k1 in enumerate(df.columns):
for k2 in df.columns[i:]:
corr = rolling_corr(df[k1], df[k2], window,
min_periods=min_periods)
all_results[k1][k2] = corr
all_results[k2][k1] = corr
return Panel.from_dict(all_results).swapaxes('items', 'major')
def _rolling_moment(arg, window, func, minp, axis=0, freq=None,
center=False, time_rule=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
time_rule : Legacy alias for freq
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, time_rule)
calc = lambda x: func(x, window, minp=minp, **kwargs)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
rs = return_hook(result)
if center:
rs = _center_window(rs, window, axis)
return rs
def _center_window(rs, window, axis):
if axis > rs.ndim-1:
raise ValueError("Requested axis is larger then no. of argument dimensions")
offset = int((window - 1) / 2.)
if isinstance(rs, (Series, DataFrame, Panel)):
rs = rs.shift(-offset, axis=axis)
else:
rs_indexer = [slice(None)] * rs.ndim
rs_indexer[axis] = slice(None, -offset)
lead_indexer = [slice(None)] * rs.ndim
lead_indexer[axis] = slice(offset, None)
na_indexer = [slice(None)] * rs.ndim
na_indexer[axis] = slice(-offset, None)
rs[tuple(rs_indexer)] = np.copy(rs[tuple(lead_indexer)])
rs[tuple(na_indexer)] = np.nan
return rs
def _process_data_structure(arg, kill_inf=True):
if isinstance(arg, DataFrame):
return_hook = lambda v: type(arg)(v, index=arg.index,
columns=arg.columns)
values = arg.values
elif isinstance(arg, Series):
values = arg.values
return_hook = lambda v: Series(v, arg.index)
else:
return_hook = lambda v: v
values = arg
if not issubclass(values.dtype.type, float):
values = values.astype(float)
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return return_hook, values
#------------------------------------------------------------------------------
# Exponential moving moments
def _get_center_of_mass(com, span, halflife):
valid_count = len([x for x in [com, span, halflife] if x is not None])
if valid_count > 1:
raise Exception("com, span, and halflife are mutually exclusive")
if span is not None:
# convert span to center of mass
com = (span - 1) / 2.
elif halflife is not None:
# convert halflife to center of mass
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif com is None:
raise Exception("Must pass one of com, span, or halflife")
return float(com)
@Substitution("Exponentially-weighted moving average", _unary_arg, "")
@Appender(_ewm_doc)
def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, time_rule=None,
adjust=True):
com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq, time_rule)
def _ewma(v):
result = algos.ewma(v, com, int(adjust))
first_index = _first_valid_index(v)
result[first_index: first_index + min_periods] = NaN
return result
return_hook, values = _process_data_structure(arg)
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
def _first_valid_index(arr):
# argmax scans from left
return notnull(arr).argmax() if len(arr) else 0
@ | Substitution("Exponentially-weighted moving variance", _unary_arg, _bias_doc) | pandas.util.decorators.Substitution |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import re
import sys
import tempfile
from collections import namedtuple
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from .. import tensor as mt
from .. import dataframe as md
from .. import remote as mr
from ..config import option_context
from ..deploy.utils import load_service_config_file
from ..session import execute, fetch, fetch_log
test_namedtuple_type = namedtuple("TestNamedTuple", "a b")
@pytest.fixture
def setup():
from ..deploy.oscar.tests.session import new_test_session
sess = new_test_session(address="127.0.0.1", init_local=True, default=True)
with option_context({"show_progress": False}):
try:
from .. import __version__ as mars_version
assert sess.get_cluster_versions() == [mars_version]
yield sess
finally:
sess.stop_server()
def test_session_async_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
assert expected == res
res = a.sum().execute(wait=False)
res = res.result().fetch()
assert expected == res
raw_df = pd.DataFrame(raw_a)
expected = raw_df.skew()
df = md.DataFrame(a)
res = df.skew().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.skew().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
res = mt.ExecutableTuple(t).execute(wait=False)
res = fetch(*res.result())
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
def test_executable_tuple_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
raw_df = | pd.DataFrame(raw_a) | pandas.DataFrame |
import psutil
import pandas as pd
from datetime import datetime
from computerMetricCollector.metricsCollector import Collector
class ProcessMetrics(Collector):
def __init__(self, logger, machine_id, metrics, metrics_to_encrypt, datetime_format, url):
self.is_stored = False
self.is_stored_locally = False
self.logger = logger
self.machine_id = machine_id
self.metrics_to_encrypt = metrics_to_encrypt
self.datetime_format = datetime_format
self.metrics_df = pd.DataFrame(columns=metrics)
self.remote_url = url
def fetch_metrics(self):
"""
This function fetch the metrics to be store in the database
:return:
"""
self.logger.info("Start fetching for process metrics")
pid_arr = psutil.pids()
for pid in pid_arr:
try:
self.logger.debug("Fetching process metrics for process pid " + str(pid))
process = psutil.Process(pid)
self.logger.debug("Fetching process metrics for process name " + process.name())
p_memory_info = process.memory_info()
cpu_times = process.cpu_times()
cpu_collect_int = 0.1
# non_private_mem = 0
p_io_info = process.io_counters()
metrics_rec = {
"machine_id": self.machine_id,
"entry_time": datetime.now().strftime(self.datetime_format),
"pid": process.pid,
"name": process.name(),
"start_time": process.create_time(),
"start_user": process.username(),
"process_status": process.status(),
"cpu_user_time": cpu_times.user,
"cpu_kernel_time": cpu_times.system,
"cpu_percent": process.cpu_percent(cpu_collect_int),
"memory_percent_used_byte": process.memory_percent(),
"memory_physical_used_byte": p_memory_info.rss,
"memory_virtual_used_byte": p_memory_info.vms,
"memory_unique_used_byte": p_memory_info.private,
"memory_page_fault": p_memory_info.num_page_faults,
"io_read_count": p_io_info.read_count,
"io_read_bytes": p_io_info.read_bytes,
"io_write_count": p_io_info.write_count,
"io_write_bytes": p_io_info.write_bytes,
"thread_num": process.num_threads()
}
self.metrics_df = self.metrics_df.append(metrics_rec, ignore_index=True)
self.metrics_df = self.metrics_df.reset_index(drop=True)
except psutil.AccessDenied as ad:
self.logger.warning("Access denied to fetch process metrics for pid {}".format(str(pid)))
self.logger.warning(ad)
except psutil.NoSuchProcess as nsp:
self.logger.warning("No process found for pid {}".format(str(pid)))
self.logger.warning(nsp)
except Exception as e:
self.logger.error(e)
self.logger.info("End fetching for process metrics")
def get_metrics_df(self):
"""
This function return the metrics data frame in collector instance
:return: metrics data frame create from fetch metrics function
"""
self.logger.info("Get metrics dataframe for network metrics")
return self.metrics_df
def reset_metrics_df(self):
"""
This function resets the metrics data frame and enable the instance to fetch again
:return:
"""
self.logger.info("Reset in memory dataframe for collector " + type(self).__name__)
self.metrics_df = | pd.DataFrame(columns=self.metrics_df.columns) | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or | is_categorical_dtype(right) | pandas.core.dtypes.common.is_categorical_dtype |
import cv2
import numpy as np
import kNN
import pandas
global img, flag
global point1, point2
global points
def on_mouse(event, x, y, flags, param):
global img, point1, point2, flag
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: #左键点击
point1 = (x,y)
cv2.circle(img2, point1, 10, (0,255,0), 5)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): #按住左键拖曳
cv2.rectangle(img2, point1, (x,y), (255,0,0), 5)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: #左键释放
point2 = (x,y)
cv2.rectangle(img2, point1, point2, (0,0,255), 5)
cv2.imshow('image', img2)
min_x = min(point1[0],point2[0])
min_y = min(point1[1],point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] -point2[1])
cut_img = img[min_y:min_y+height, min_x:min_x+width]
cv2.imwrite('aftercut.jpg', cut_img)
flag=1
def collectBGR():
global points,flag
img = cv2.imread("aftercut.jpg", 1)
x_step = img.shape[1] / 8
y_step = img.shape[0] / 12
x_cr = x_step / 2
y_cr = y_step / 2
points = np.zeros((12, 8, 3), dtype=int)
count = 0
for i in range(1, 13):
for j in range(1, 9):
points[i - 1, j - 1] = (img[int(round(y_cr)), int(round(x_cr))])
count += 1
x_cr = x_cr + x_step
y_cr = y_cr + y_step
x_cr = x_step / 2
flag = 2
# print(points)
def calcandsave():
global points
dataSet, labels = kNN.loadDataSet()
result = np.zeros((12,8))
for i in range(12):
for j in range(8):
result[i,j] = kNN.classify0(points[i,j],np.array(dataSet),labels)
df = | pandas.DataFrame(result) | pandas.DataFrame |
#!/usr/bin/env python
# Description of the module
''' Classifying mnist using keras + CNN
This module implements mnist classification
using Keras as described in the kernel at
https://www.kaggle.com/yassineghouzam/introduction-to-cnn-keras-0-997-top-6/code
'''
# Imports
## In-built modules
## Third-party modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
## Custom modules
from commonconstants import MNIST_KAGGLE_TRAIN, MNIST_KAGGLE_TEST
from data_helper import plot_confusion_matrix, print_dataset_details
# Global initialisations
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep')
reduce_size = False
# Data preparation
## Load data
train = pd.read_csv(MNIST_KAGGLE_TRAIN)
test = pd.read_csv(MNIST_KAGGLE_TEST)
test_size = 28000
# Reduce data size for prototype
if reduce_size:
train = train.head(1000)
test = test.head(100)
test_size = 100
Y_train = train.label
X_train = train.drop(labels = ["label"], axis = 1)
del train
g = sns.countplot(Y_train)
print(Y_train.value_counts())
## Check for null and missing values
print(X_train.isnull().any().describe())
print(test.isnull().any().describe())
## Normalization
X_train = X_train / 255.0
test = test / 255.0
## Reshape
X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
## Label encoding
Y_train = to_categorical(Y_train, num_classes=10)
## Split training and valdiation set
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.2, random_state = random_seed)
# Modelling
## Define the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters=32, kernel_size = (5,5), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size = (3,3), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters=64, kernel_size = (3,3), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(units=256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10, activation='softmax'))
## Set the optimizer and annealer
optimiser = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer= optimiser, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience = 3, verbose=1, min_lr = 0.00001)
epochs = 2
batch_size = 64
## Data augmentation
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False,featurewise_std_normalization=False,samplewise_std_normalization=False,
zca_whitening=False,rotation_range=10, zoom_range=0.1, width_shift_range=0.1, horizontal_flip=False, height_shift_range=0.1,
vertical_flip=False)
datagen.fit(X_train)
# Evaluate the model
# history = model.fit(X_train, Y_train, batch_size=batch_size, epochs= epochs,
# verbose = 2, validation_data=(X_val,Y_val))
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size = batch_size), epochs= epochs,validation_data=(X_val,Y_val),verbose=2,
steps_per_epoch= X_train.shape[0] // batch_size, callbacks=[learning_rate_reduction])
## Training and validation curves
fig, ax = plt.subplots(2,1)
ax[0].plot(history.history['loss'], color='b', label='Training loss')
ax[0].plot(history.history['val_loss'], color='r', label='validation loss', axes= ax[0])
legend = ax[0].legend(loc='best', shadow = True)
ax[1].plot(history.history['acc'], color='b', label='Training accuracy')
ax[1].plot(history.history['val_acc'], color='r', label='Validation accuracy')
legend = ax[1].legend(loc='best', shadow = True)
plt.show()
## Confusion matrix
Y_pred = model.predict(X_val)
Y_pred_classes = np.argmax(Y_pred, axis=1)
Y_true = np.argmax(Y_val, axis=1)
confusion_mtx = confusion_matrix(Y_true,Y_pred_classes)
plot_confusion_matrix(confusion_mtx, classes = range(10))
errors = (Y_pred_classes - Y_true !=0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index, img_errors, pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows, ncols,sharex=True, sharey= True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow(img_errors[error].reshape(28,28))
ax[row,col].set_title('Predicted : {}\n True : {}'.format(pred_errors[error], obs_errors[error]))
n +=1
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
plt.show()
# Prediction and submition
## Predict and Submit results
results = model.predict(test)
results = np.argmax(results, axis = 1)
results = | pd.Series(results, name='Label') | pandas.Series |
# Copyright (c) 2019, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file test the Renumbering features
import gc
import pandas as pd
import pytest
import cudf
from cugraph.structure.number_map import NumberMap
from cugraph.tests import utils
def test_renumber_ips():
source_list = [
"192.168.1.1",
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
]
dest_list = [
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
"192.168.1.1",
]
pdf = pd.DataFrame({"source_list": source_list, "dest_list": dest_list})
gdf = cudf.from_pandas(pdf)
gdf["source_as_int"] = gdf["source_list"].str.ip2int()
gdf["dest_as_int"] = gdf["dest_list"].str.ip2int()
numbering = NumberMap()
numbering.from_series(gdf["source_as_int"], gdf["dest_as_int"])
src = numbering.to_internal_vertex_id(gdf["source_as_int"])
dst = numbering.to_internal_vertex_id(gdf["dest_as_int"])
check_src = numbering.from_internal_vertex_id(src)["0"]
check_dst = numbering.from_internal_vertex_id(dst)["0"]
assert check_src.equals(gdf["source_as_int"])
assert check_dst.equals(gdf["dest_as_int"])
def test_renumber_ips_cols():
source_list = [
"192.168.1.1",
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
]
dest_list = [
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
"192.168.1.1",
]
pdf = pd.DataFrame({"source_list": source_list, "dest_list": dest_list})
gdf = cudf.from_pandas(pdf)
gdf["source_as_int"] = gdf["source_list"].str.ip2int()
gdf["dest_as_int"] = gdf["dest_list"].str.ip2int()
numbering = NumberMap()
numbering.from_dataframe(gdf, ["source_as_int"], ["dest_as_int"])
src = numbering.to_internal_vertex_id(gdf["source_as_int"])
dst = numbering.to_internal_vertex_id(gdf["dest_as_int"])
check_src = numbering.from_internal_vertex_id(src)["0"]
check_dst = numbering.from_internal_vertex_id(dst)["0"]
assert check_src.equals(gdf["source_as_int"])
assert check_dst.equals(gdf["dest_as_int"])
@pytest.mark.skip(reason="temporarily dropped string support")
def test_renumber_ips_str_cols():
source_list = [
"192.168.1.1",
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
]
dest_list = [
"172.16.17.32",
"192.168.127.12",
"172.16.17.32",
"192.168.1.1",
]
pdf = pd.DataFrame({"source_list": source_list, "dest_list": dest_list})
gdf = cudf.from_pandas(pdf)
numbering = NumberMap()
numbering.from_dataframe(gdf, ["source_list"], ["dest_list"])
src = numbering.to_internal_vertex_id(gdf["source_list"])
dst = numbering.to_internal_vertex_id(gdf["dest_list"])
check_src = numbering.from_internal_vertex_id(src)["0"]
check_dst = numbering.from_internal_vertex_id(dst)["0"]
assert check_src.equals(gdf["source_list"])
assert check_dst.equals(gdf["dest_list"])
def test_renumber_negative():
source_list = [4, 6, 8, -20, 1]
dest_list = [1, 29, 35, 0, 77]
df = | pd.DataFrame({"source_list": source_list, "dest_list": dest_list}) | pandas.DataFrame |
# Multiple test libraries ----
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.multicomp import (pairwise_tukeyhsd, MultiComparison)
# -----------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Plotly libraries ----
import plotly.express as px
import plotly.graph_objects as go
import statsmodels.api as sm
import plotly.io as pio
from plotly.subplots import make_subplots
# -----------------------------------------
# Statistics libraries ----
from statsmodels.graphics.gofplots import qqplot
from statsmodels.stats.anova import AnovaRM
from scipy import stats
from scipy.stats import (f_oneway, friedmanchisquare, kruskal, levene,
mannwhitneyu, wilcoxon)
# -----------------------------------------
class InferencialStats:
"""
Class which analyze and test one or multiple measures
Attributes
----------
measures : List of measures lists.
alpha : Significance level. By default this value is 0.05
but it can be a value assigned by user.
is_paired : Parameter that indicates if measures are paired or not.
mean : Optional value for single measure case to carry out statistic test.
Returns
-------
InferencialStats object : InferencialStats object to analyze and test
the measure or measures instaced
"""
res_print = "Stadistical features:\nStats ::> {}\nP-value ::> {}"
test_res = "Stadistic Tests"
pvalue_string = "P value"
stats_string = "Stats"
common_stadistics_string = "Common Stadistics"
normal_string = "Normal Distribution"
data_string = "Data {}"
levene_string = "Levene Test"
shapiro_string = "Shapiro Norm"
def __init__(self, measures, alpha=0.05, is_paired=False, mean=None):
"""
Create a new class: InferencialStat instance
Parameters
----------
measures: List of measures to instance a new InferencialStat object
alpha : Significance level. By default this value is 0.05
but it can be a value assigned by user.
is_paired : Parameter that indicates if measures are paired or not.
mean : Optional value for single measure case to carry out statistic test.
Returns
-------
New InsferencialStat object
"""
if type(measures) is not list:
print(
"Error: Type of argument[1].\nUse [1]measures: Must be list type")
quit()
if len(measures) == 0:
print(
"Error: Length of measures.\nUse [1]measures: Must be at least 1 or even more")
quit()
if type(is_paired) is not bool:
print(
"Error: Type of argument[2].\nUse [2]Pairing: Must be bool type")
quit()
if type(alpha) is not float:
print(
"Error: Type of argument[3].\nUse [3]alpha: Must be float type")
quit()
if mean is not None and (type(mean) != int and type(mean) != float):
print("Error:\"mean\" type is not correct. Must be \n-int\n-float\n-None")
self.measures = measures
self.alpha = alpha
self.is_paired = is_paired
self.mean = mean
self.dictionary_stats = {}
def inferencial_statistics(self):
"""
Statistical studies will be carried out for each sample
Parameters
----------
self.measures : list of measures lists.
Applies non-parametric tests to obtain stadistical features.
self.alpha : Significance level. By default this value is 0.05
but it can be a value assigned by user.
self.is_paired : Parameter that indicates if measures are paired or not.
self.mean : Optional value for single measure case to carry out statistic test.
Returns
-------
result : Dictionary
Stadistical features as the results of analyzing measures
"""
# not all(isinstance(l, list) for l in measures)
if len(self.measures) == 1:
self.single_stadistics(self.measures, self.alpha, self.mean)
else:
self.multiple_stadistics(self.measures, self.alpha, self.is_paired)
# Realización de Bonferroni
# bonferroni_test(measures, alpha)
return self.dictionary_stats
def single_stadistics(self, measure, alpha, mean):
"""
This method study data recieved by normal or non-normal stadistic tests
Parameters
----------
measures :
Input data which will be studied.
alpha:
Significance level. By default this value is 0.05 but it can be a value assigned by user.
mean:
Optional value to apply stadistical test.
Returns
-------
Dictionary object with test results
"""
self.dictionary_stats[self.common_stadistics_string] = self.common_stadistics(
measure, alpha)
if mean is not None:
stats_1samp, pvalue_1samp = stats.ttest_1samp(
np.array(measure[0]), mean)
self.dictionary_stats["T test"] = {"stat": stats_1samp,
"p-value": pvalue_1samp}
def multiple_stadistics(self, measures, alpha, is_paired):
"""
Case that analyze multiple measures
carrying out multiples stadistical test
Parameters
----------
measures : list of measures lists.
Applies non-parametric tests to obtain stadistical features.
alpha : Significance level. By default this value is 0.05
but it can be a value assigned by user.
is_paired : Parameter that indicates if measures are paired or not.
Returns
-------
result : Dictionary
Stadistical features as the results of analyzing measures
"""
self.dictionary_stats[self.common_stadistics_string] = self.common_stadistics(
measures, alpha)
measure_len = len(measures[0])
self.dictionary_stats[self.levene_string] = self.homocedasticity(
measures, alpha)
if is_paired:
if any(len(lst) != measure_len for lst in measures):
print(
"Error: Data Length.\nUse isPaired: Each measure must have same length of data")
raise SystemExit()
if not self.check_normality():
print("Normality conditions are not met.\n")
self.non_parametric_test(measures, is_paired)
else:
print("Normality conditions are met.\n")
self.parametric_test(measures, is_paired)
else:
if not self.check_normality():
print("Normality conditions are not met.\n")
self.non_parametric_test(measures, is_paired)
else:
print("Normality conditions are met.\n")
self.parametric_test(measures, is_paired)
def common_stadistics(self, measures, alpha):
"""
Generate multiple sublopts showing every data histogram stored in
the fisrt parameter.
Check if measures from the firteamsst argument have Normal or Non-Normal
Distribution and store the results within a dictionary.
Parameters
----------
measures : list of data lists.
alpha : Signifance level that will be applied in every test.
Returns
-------
result: Dictionary with all stadistical tests evaluated to every data
"""
dictionary = {}
norm_list = self.dictionary_stats[self.shapiro_string] = []
for index, data in enumerate(measures):
data_test = np.array(data)
# histogram
fig_histogram = px.histogram(data_test, marginal="box")
fig_histogram['layout'].update({
'title': 'Histogram',
'showlegend': False,
'width': 800,
'height': 800,
})
# Qqplot
qqplot_data = qqplot(data_test, line='s').gca().lines
fig_qqplot = go.Figure()
fig_qqplot.add_trace({
'type': 'scatter',
'x': qqplot_data[0].get_xdata(),
'y': qqplot_data[0].get_ydata(),
'mode': 'markers',
'marker': {
'color': '#19d3f3'
}
})
fig_qqplot.add_trace({
'type': 'scatter',
'x': qqplot_data[1].get_xdata(),
'y': qqplot_data[1].get_ydata(),
'mode': 'lines',
'line': {
'color': '#636efa'
}
})
fig_qqplot['layout'].update({
'title': 'Quantile-Quantile Plot',
'xaxis': {
'title': 'Theoritical Quantities',
'zeroline': False
},
'yaxis': {
'title': 'Sample Quantities'
},
'showlegend': False,
'width': 800,
'height': 800,
})
print("Applying Shapiro-Wilk test in Data {}".format(index+1))
dic_shapiro = self.check_shapiro(data, alpha)
dictionary[self.data_string.format(index+1)] = {
'Histogram': fig_histogram.to_json(),
'Qqplot': fig_qqplot.to_json(),
'Shapiro Wilk': dic_shapiro
}
norm_list.append(dic_shapiro[self.normal_string])
return dictionary
def check_shapiro(self, measures, alpha):
"""
Apply Shapiro-Wilk test to input measures and analyze the result
comparing pvalue obtained against alpha
Parameters
----------
measures : list of data lists.
alpha : Signifance level that will be applied in every test.
Returns
-------
Dictionary with all stadistical tests evaluated to every data
If pvalue > alpha then Normal Distribution satisfied
"""
stat, p = stats.shapiro(measures)
if p > alpha:
res = True
else:
res = False
res_dic = {
"Stat": stat,
self.pvalue_string: p,
self.normal_string: res
}
return res_dic
def check_normality(self):
"""
Check if levene and Shapiro-Wilk test's results were satisfied by the
measure or measures of InferencialStats object.
Returns
-------
Boolean value: True if normality conditions are satisfied.
E.O.C the normality conditions are not satisfied
then it return False.
"""
norm_list = self.dictionary_stats[self.shapiro_string]
res = False
shapiro_norm = all((value) == True for value in norm_list)
if self.dictionary_stats[self.levene_string].get("Homogeneity") and shapiro_norm:
res = True
return res
def homocedasticity(self, measures, alpha):
"""
Use Levene Test with input measures. Then studies the result of
test comparing pvalue against alpha
Parameters
----------
measures :
List of measures to be applied to the Levene's test for
homocedasticity testing.
alpha:
Significance level. By default this value is 0.05 but
it can be a value assigned by user.
Returns
-------
True : If `pvalue > alpha` homogeneity is then satisfied.
False : Homogeneity not satisfied
"""
print("\n\nApplying Levene Test\n")
dict_res = {}
stats, p = levene(*measures)
if p > alpha:
res = True
else:
res = False
dict_res = {
"Homogeneity": res,
"Stats": stats,
self.pvalue_string: p
}
return dict_res
def parametric_test(self, measures, is_paired):
"""
Applies `the best case` parametric tests for the samples `measures` obtained
as parameters.
Parameters
----------
measures : list of measures lists
Applies tests to obtain stadistical features
is_paired : Parameter that indicates if measures are paired or not.
Returns
-------
result : Dictionary
Stadistical features as the results of manipulate data samples
"""
print("Applying parametric test")
arr_measures = np.array(measures, dtype=object)
if len(measures) < 3:
if is_paired:
t_stats, p_value = (arr_measures[0], arr_measures[1])
print("\n\nRunning T-test \"Between groups\"...\n\n")
self.dictionary_stats[self.test_res] = {
"T-test Between Groups": {
self.stats_string: t_stats,
self.pvalue_string: p_value
}
}
else:
t_stats, p_value = stats.ttest_ind(
arr_measures[0], arr_measures[1])
print("\n\nRunning T-test \"within groups\"...\n\n")
self.dictionary_stats[self.test_res] = {
"T-test Within Groups": {
self.stats_string: t_stats,
self.pvalue_string: p_value
}
}
else:
if is_paired:
df = self.make_dataframe(arr_measures)
aovrm = AnovaRM(df, depvar='datas',
subject='groups', within=['rows']).fit()
print("\n\nRunning One Way ANOVA *Repeated Measures*\n\n")
self.dictionary_stats[self.test_res] = {
"One way ANOVA RM": {
aovrm.summary()
}
}
else:
f_stats, p_value = f_oneway(*arr_measures)
print("\n\nRunning One Way ANOVA\n\n")
self.dictionary_stats[self.test_res] = {
"One way ANOVA": {
self.stats_string: f_stats,
self.pvalue_string: p_value
}
}
def non_parametric_test(self, measures, is_paired):
"""
Applies `the best case` non-parametric tests for the samples `datas` obtained
as parameters.
Parameters
----------
measures : list of measures lists.
Applies non-parametric tests to obtain stadistical features.
is_paired : Parameter that indicates if measures are paired or not.
Returns
-------
result : Dictionary
Stadistical features as the results of manipulate data samples
"""
print("Applying Non-parametric test")
arr_measures = np.array(measures, dtype=object)
if len(measures) < 3:
# Para 2 muestras
if is_paired:
# Aplicamos test de emparejamiento Wilcoxon
stats, pvalue = wilcoxon(arr_measures[0], arr_measures[1])
print("\n\nRunning Wilcoxon\n\n")
self.dictionary_stats[self.test_res] = {
"Wilcoxon test": {
self.stats_string: stats,
self.pvalue_string: pvalue
}
}
else:
# Aplicamos test no emparejados
stats, pvalue = mannwhitneyu(
arr_measures[0], arr_measures[1], alternative="two-sided")
print("\n\nRunning Mannwhitneyu\n\n")
self.dictionary_stats[self.test_res] = {
"Mann-Whitney test": {
self.stats_string: stats,
self.pvalue_string: pvalue
}
}
else:
# Para 3 o + muestras
if is_paired:
# Aplicamos test de emparejamiento Friedmann
stats, pvalue = friedmanchisquare(*arr_measures)
print("\n\nRunning Friedmanchisquare\n\n")
self.dictionary_stats[self.test_res] = {
"Friedman test": {
self.stats_string: stats,
self.pvalue_string: pvalue
}
}
else:
# Aplicamos test no emparejados Kruskal
stats, pvalue = kruskal(*arr_measures)
print("\n\nRunning Kruskal\n\n")
self.dictionary_stats[self.test_res] = {
"Kruskal-Wallis test": {
self.stats_string: stats,
self.pvalue_string: pvalue
}
}
def crit_diff(self):
"""
Display a graphical analisys comparing critical
differences from each measures
Parameters
----------
self.measures : lists of measures.
self.alpha : Significance level that share all data.
Returns
-------
result : Graphical comparison displaying critical differences
"""
import math
if any(len(measure) >= 25 for measure in self.measures):
print(
"Error: Measure Length.\nUse measures: Each measure must have less than 25 elements")
raise SystemExit()
bon_05 = [0, 1.960, 2.242, 2.394, 2.498, 2.576, 2.639, 2.690, 2.735, 2.773, 2.807, 2.838,
2.866, 2.891, 2.914, 2.936, 2.955, 2.974, 2.992, 3.008, 3.024, 3.038, 3.052, 3.066, 3.078]
bon_10 = [0, 1.645, 1.960, 2.128, 2.242, 2.327, 2.394, 2.450, 2.498, 2.540, 2.576, 2.609,
2.639, 2.666, 2.690, 2.713, 2.735, 2.755, 2.773, 2.791, 2.807, 2.823, 2.838, 2.852, 2.866]
N = len(self.measures)
k = len(self.measures[0])
q0_05 = bon_05[k]
q0_10 = bon_10[k]
cd0_05 = q0_05 * math.sqrt((k*(k+1))/(6*N))
cd0_10 = q0_10 * math.sqrt((k*(k+1))/(6*N))
# Rankings
ranks = get_ranks(self.measures)
print("Average Rankings -> {}".format(ranks))
print("Min Ranking ->{}\n\n".format(min(ranks)))
ids = ["Measure {}".format(data+1)
for data in range(len(self.measures))]
data_df = {
'Measures': ids,
'Ranking': ranks
}
df = | pd.DataFrame(data_df) | pandas.DataFrame |
import heapq
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
def calculate_rating(a, b, c, b1, c1):
rating = np.mean(a)
user1 = calculate_consin(a, b) * (b1 - rating)
user2 = calculate_consin(a, c) * (c1 - rating)
rating = rating + ((user1 + user2) / (calculate_consin(a, b) + calculate_consin(a, c)))
return rating
def calculate_consin(a, b):
temp = np.linalg.norm(a) * np.linalg.norm(b)
if temp == 0:
return 0
else:
return np.dot(a, b) / temp
def data_preprocess(path):
df = | pd.read_csv(path) | pandas.read_csv |
# coding: utf-8
import pandas as pd
import numpy as np
import dateutil
import requests
import datetime
from matplotlib import pyplot as plt
def smape(actual, predicted):
a = np.abs(np.array(actual) - np.array(predicted))
b = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(a, b, out=np.zeros_like(a), where=b != 0, casting='unsafe'))
from dateutil.parser import parse
from datetime import date, timedelta
def date_add_hours(start_date, hours):
end_date = parse(start_date) + timedelta(hours=hours)
end_date = end_date.strftime('%Y-%m-%d %H:%M:%S')
return end_date
def date_add_days(start_date, days):
end_date = parse(start_date[:10]) + timedelta(days=days)
end_date = end_date.strftime('%Y-%m-%d')
return end_date
def diff_of_hours(time1, time2):
hours = (parse(time1) - parse(time2)).total_seconds() // 3600
return abs(hours)
utc_date = date_add_hours(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), -8)
print('现在是UTC时间:{}'.format(utc_date))
print('距离待预测时间还有{}个小时'.format(diff_of_hours(date_add_days(utc_date, 1), utc_date) + 1))
filename = "2018-05-31_ensemble_all_zhoujie"
day = "2018-05-31"
filepath = '../image/results/' + "".join(day.split("-")) + '/' + filename + '.csv' # 0514
# filepath = './0513commit/bag_55.csv' #0513
# filepath = './0513commit/api_concat_bag_55_6hours.csv' #0513api
result = pd.read_csv(filepath)
now_date = day
api = False
# # 北京数据
# In[5]:
## 2018-04到最新的数据
if api:
url = 'https://biendata.com/competition/airquality/bj/2018-04-25-0/2018-06-01-0/2k0d1d8'
respones = requests.get(url)
with open("../image/bj_aq_new_show.csv", 'w') as f:
f.write(respones.text)
# In[6]:
replace_dict = {'wanshouxigong': 'wanshouxig', 'aotizhongxin': 'aotizhongx', 'nongzhanguan': 'nongzhangu',
'fengtaihuayuan': 'fengtaihua',
'miyunshuiku': 'miyunshuik', 'yongdingmennei': 'yongdingme', 'xizhimenbei': 'xizhimenbe'}
# In[7]:
replace_dict = {'wanshouxigong': 'wanshouxig', 'aotizhongxin': 'aotizhongx', 'nongzhanguan': 'nongzhangu',
'fengtaihuayuan': 'fengtaihua',
'miyunshuiku': 'miyunshuik', 'yongdingmennei': 'yongdingme', 'xizhimenbei': 'xizhimenbe'}
bj_aq_new_show = pd.read_csv('../image/bj_aq_new_show.csv')
bj_aq_new_show.columns = ['id', 'stationId', 'utc_time', 'PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']
bj_aq_new_show = bj_aq_new_show[['stationId', 'utc_time', 'PM2.5', 'PM10', 'O3']]
bj_aq_new_show['location'] = bj_aq_new_show['stationId'].apply(lambda x: x.split('_aq')[0])
bj_aq_new_show['location'] = bj_aq_new_show['location'].replace(replace_dict)
# bj_aq_new_show['utc_time'] = pd.to_datetime(bj_aq_new_show['utc_time'])
bj_aq_new_show = bj_aq_new_show[['utc_time', 'PM2.5', 'PM10', 'O3', 'location']]
bj_aq_new_show.head(2)
# # 伦敦数据
# In[8]:
## London 2018-04到最新的数据
if api:
url = 'https://biendata.com/competition/airquality/ld/2018-03-30-23/2018-06-01-01/2k0d1d8'
respones = requests.get(url)
with open("../image/lw_aq_new.csv", 'w') as f:
f.write(respones.text)
lw_aq_new = pd.read_csv('../image/lw_aq_new.csv')
lw_aq_new.columns = ['id', 'location', 'utc_time', 'PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']
lw_aq_new = lw_aq_new[['utc_time', 'PM2.5', 'PM10', 'O3', 'location']]
lw_aq_new.head(2)
# In[9]:
aq_new = pd.concat([bj_aq_new_show, lw_aq_new])
aq_new['utc_time'] = pd.to_datetime(aq_new['utc_time'])
aq_new.head(3)
# In[10]:
bj_aq_new_show.utc_time.max()
# In[11]:
def getlocation(x):
return x.split('_aq')[0].split('#')[0]
hour1 = pd.to_datetime('2018-01-06 01:00:00') - pd.to_datetime('2018-01-06 00:00:00')
result['location'] = result['test_id'].apply(lambda x: getlocation(x))
result['utc_time'] = result['test_id'].apply(lambda x: x.split('#')[1])
result['utc_time'] = result['utc_time'].apply(
lambda x: str( | pd.to_datetime(now_date + ' 00:00:00') | pandas.to_datetime |
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from shap_bootstrap.models import ShapleyModel
from shap_bootstrap import utils
from sklearn.base import RegressorMixin
from sklearn.base import BaseEstimator
from sklearn.impute import SimpleImputer
class BuildingBlockPipeline(BaseEstimator, RegressorMixin):
def __init__(
self, processing_block, explainer_block, cluster_block, ensemble_block
):
# self.description = 'Pipeline model for thesis'
self.processing_block = processing_block
self.explainer_block = explainer_block
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.n_estimators = 0
self.n_avg_nodes = 0
def fit(self, X, y):
pass
def predict(self, X, y):
pass
def calculate_complexity(self):
models = self.ensemble_block.model_dict
if models:
self.param_sum = sum(
[
models["model{}".format(i)].trees_to_dataframe().shape[0]
for i in range(len(models))
]
)
self.n_estimators = sum(
[
np.unique(
models["model{}".format(i)].trees_to_dataframe()["Tree"]
).size
for i in range(len(models))
]
)
self.n_avg_nodes = self.param_sum / self.n_estimators
elif self.explainer_block.base_model is not None:
self.param_sum = self.explainer_block.base_model.trees_to_dataframe().shape[
0
]
self.n_estimators = np.unique(
self.explainer_block.base_model.trees_to_dataframe()["Tree"]
).size
self.n_avg_nodes = self.param_sum / self.n_estimators
else:
self.param_sum = 0
self.n_estimators = 0
self.n_avg_nodes = 0
return self.param_sum, self.n_estimators, self.n_avg_nodes
class B1_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self, processing_block, explainer_block, cluster_block, ensemble_block
):
self.processing_block = processing_block
self.explainer_block = explainer_block
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = "Data -> Explainer_model -> Prediction"
self.tag = "S1"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
X = pd.DataFrame(X)
self.explainer_block.fit(X, y, X_train, y_train, X_val, y_val)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
# X = pd.DataFrame(X)
y_pred = self.explainer_block.predict(X)
return y_pred
class B2_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self, processing_block, explainer_block, cluster_block, ensemble_block
):
self.processing_block = processing_block
self.explainer_block = explainer_block
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = "Data -> Cluster -> Ensemble -> Prediction"
self.tag = "S2"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
self.X_train = X
self.y_train = y
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
cluster_labels = self.cluster_block.cluster_training_instances(X)
X_train.columns = X.columns
X_val.columns = X.columns
self.ensemble_block.train(X_train, X_val, y_train, y_val, cluster_labels)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
cluster_labels_test = self.cluster_block.cluster_test_instances(self.X_train, X)
y_pred = self.ensemble_block.predict(X, cluster_labels_test)
return y_pred
class B4_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self, processing_block, explainer_block, cluster_block, ensemble_block
):
self.processing_block = processing_block
self.explainer_block = explainer_block
params = {
"eta": 0.1,
"max_depth": 1,
"objective": "reg:squarederror",
"subsample": 0.75,
"eval_metric": "rmse",
"lambda": 0.1,
}
self.explainer_block.keyword_args = {
"num_boost_round": 200,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 20,
}
self.explainer_block.explainer_params = params
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = (
"Data -> Explainer -> Shapley -> Cluster -> Ensemble -> Prediction"
)
self.tag = "S6"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
self.X_train = X
self.y_train = y
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
self.shapley_values = self.explainer_block.fit_transform(
X, y, X_train, y_train, X_val, y_val
)
cluster_labels = self.cluster_block.cluster_training_instances(
self.shapley_values
)
X_train.columns = X.columns
X_val.columns = X.columns
shapley_train = pd.DataFrame(self.shapley_values[X_train.index])
shapley_val = pd.DataFrame(self.shapley_values[X_val.index])
self.ensemble_block.train(
shapley_train, shapley_val, y_train, y_val, cluster_labels
)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
# X = pd.DataFrame(X)
shapley_values_test = self.explainer_block.transform(X)
cluster_labels_test = self.cluster_block.cluster_test_instances(
self.shapley_values, shapley_values_test
)
shapley_values_test = pd.DataFrame(shapley_values_test)
y_pred = self.ensemble_block.predict(shapley_values_test, cluster_labels_test)
return y_pred
class B5_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self, processing_block, explainer_block, cluster_block, ensemble_block
):
self.processing_block = processing_block
self.explainer_block = explainer_block
params = {
"eta": 0.1,
"max_depth": 1,
"objective": "reg:squarederror",
"subsample": 0.75,
"eval_metric": "rmse",
"lambda": 0.1,
}
self.explainer_block.keyword_args = {
"num_boost_round": 200,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 20,
}
self.explainer_block.explainer_params = params
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = "Data -> Explainer -> Shapley -> Cluster ->Map Original Data -> Ensemble -> Prediction"
self.tag = "S7"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
# X = pd.DataFrame(X)
self.X_train = X
self.y_train = y
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
self.shapley_values = self.explainer_block.fit_transform(
X, y, X_train, y_train, X_val, y_val
)
cluster_labels = self.cluster_block.cluster_training_instances(
self.shapley_values
)
X_train.columns = X.columns
X_val.columns = X.columns
self.ensemble_block.train(X_train, X_val, y_train, y_val, cluster_labels)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
# X = pd.DataFrame(X)
shapley_values_test = self.explainer_block.transform(X)
cluster_labels_test = self.cluster_block.cluster_test_instances(
self.shapley_values, shapley_values_test
)
y_pred = self.ensemble_block.predict(X, cluster_labels_test)
return y_pred
class B7_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self,
processing_block,
explainer_block,
reduce_block,
cluster_block,
ensemble_block,
):
self.processing_block = processing_block
self.explainer_block = explainer_block
params = {
"eta": 0.1,
"max_depth": 1,
"objective": "reg:squarederror",
"subsample": 0.75,
"eval_metric": "rmse",
"lambda": 0.1,
}
self.explainer_block.keyword_args = {
"num_boost_round": 200,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 20,
}
self.explainer_block.explainer_params = params
self.reduce_block = reduce_block
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = "Data -> Explainer -> Shapley -> Reduced-Shapley -> Cluster -> Ensemble -> Prediction"
self.tag = "S10"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
# X = pd.DataFrame(X)
self.X_train = X
self.y_train = y
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
self.shapley_values = self.explainer_block.fit_transform(
X, y, X_train, y_train, X_val, y_val
)
self.shapley_values_reduced = self.reduce_block.fit_transform(
self.shapley_values
)
cluster_labels = self.cluster_block.cluster_training_instances(
self.shapley_values_reduced
)
X_train.columns = X.columns
X_val.columns = X.columns
shapley_train = pd.DataFrame(self.shapley_values_reduced[X_train.index])
shapley_val = pd.DataFrame(self.shapley_values_reduced[X_val.index])
self.ensemble_block.train(
shapley_train, shapley_val, y_train, y_val, cluster_labels
)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
# X = pd.DataFrame(X)
shapley_values_test = self.explainer_block.transform(X)
shapley_values_test_reduced = self.reduce_block.transform(shapley_values_test)
cluster_labels_test = self.cluster_block.cluster_test_instances(
self.shapley_values_reduced, shapley_values_test_reduced
)
shapley_values_test_reduced = pd.DataFrame(shapley_values_test_reduced)
y_pred = self.ensemble_block.predict(
shapley_values_test_reduced, cluster_labels_test
)
return y_pred
class B8_Branch_Pipeline(BuildingBlockPipeline):
def __init__(
self,
processing_block,
explainer_block,
reduce_block,
cluster_block,
ensemble_block,
):
self.processing_block = processing_block
self.explainer_block = explainer_block
params = {
"eta": 0.1,
"max_depth": 1,
"objective": "reg:squarederror",
"subsample": 0.75,
"eval_metric": "rmse",
"lambda": 0.1,
}
self.explainer_block.keyword_args = {
"num_boost_round": 200,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 20,
}
self.explainer_block.explainer_params = params
self.reduce_block = reduce_block
self.cluster_block = cluster_block
self.ensemble_block = ensemble_block
self.param_sum = 0
self.description = "Data -> Explainer -> Shapley -> Reduced-Shapley -> Cluster ->Map Original Data -> Ensemble -> Prediction"
self.tag = "S11"
def fit(self, X, y):
X, y = self.processing_block.impute_data(X, y)
# X = pd.DataFrame(X)
self.X_train = X
self.y_train = y
X_train, X_val, y_train, y_val = self.processing_block.split_data(
X, y, test_split=0.15, scale=True
)
self.shapley_values = self.explainer_block.fit_transform(
X, y, X_train, y_train, X_val, y_val
)
self.shapley_values_reduced = self.reduce_block.fit_transform(
self.shapley_values
)
self.cluster_labels = self.cluster_block.cluster_training_instances(
self.shapley_values_reduced
)
X_train.columns = X.columns
X_val.columns = X.columns
self.ensemble_block.train(X_train, X_val, y_train, y_val, self.cluster_labels)
def predict(self, X):
X = self.processing_block.impute_data(X, scale=True)
# X = pd.DataFrame(X)
shapley_values_test = self.explainer_block.transform(X)
shapley_values_test_reduced = self.reduce_block.transform(shapley_values_test)
self.cluster_labels_test = self.cluster_block.cluster_test_instances(
self.shapley_values_reduced, shapley_values_test_reduced
)
y_pred = self.ensemble_block.predict(X, self.cluster_labels_test)
return y_pred
class CustomPipelineModel(BaseEstimator, RegressorMixin):
"""
:description : Pipeline model for thesis
"""
def __init__(self, notebook_mode, explainer_type, ensemble_type, nClusters):
self.description = "Pipeline model for thesis"
self.notebook_mode = notebook_mode
self.explainer_type = explainer_type
self.ensemble_type = ensemble_type
self.nClusters = nClusters
self.shap_model = ShapleyModel(
explainer_type, ensemble_type, nClusters, notebook_mode
)
self.imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
self.pipeline = Pipeline(steps=[("Shapley-Blackbox", self)])
def fit(self, X, y):
(
X_train,
shap_dataframe,
original_split,
y_org,
original_split_shapley,
y_shap,
kmeans_original,
kmeans_shapley,
) = self.prepareData(X, y)
self.original_labels = kmeans_original.labels_
self.shapley_labels = kmeans_shapley.labels_
self.X = X_train
self.shap_values = shap_dataframe
(
self.model_dict_shapley,
self.eval_results_shapley,
) = self.shap_model.trainPredictor(original_split_shapley, y_shap)
# Use if task is classification
self.classes_ = np.unique(y)
return self
def predict(self, X):
# X,_ = processing.clear_nan(X,None)
X = self.imputer.fit_transform(X)
X = self.X_scaler.transform(X)
shapley_test = self.shap_model.predictShapleyValues(X)
shapley_test_df = pd.DataFrame(shapley_test)
data_dict_shapley_test = self.shap_model.clusterDataTest(
self.shap_values, self.shapley_labels, shapley_test_df
)
original_split_shapley_test = self.shap_model.prepareTestData(
data_dict_shapley_test, X, shapley_test_df
)
predictions = original_split_shapley_test.apply(
lambda x: self.shap_model.predictRow(x, self.model_dict_shapley), axis=1
)
return predictions
def prepareData(self, X, y):
X, y = utils.clear_nan(X, y)
(
X_train,
X_train_tr,
X_train_val,
X_test,
y_train,
y_train_tr,
y_train_val,
y_test,
self.X_scaler,
) = utils.prepare_pipeline_data(X, y)
shap_values = self.shap_model.explainShapley(
X_train, y_train, X_train_tr, y_train_tr, X_train_val, y_train_val
)
shap_dataframe = | pd.DataFrame(data=shap_values) | pandas.DataFrame |
#! /anaconda3/envs/splitwavepy/bin/python
# Welcome to discre.py. This script (or maybe module??) is for testing whether
# SKS SKKS pairs exhibit matching or discrepent splitting. This will be done
# by using the splitting measured for each phase to correct the splitting on
# the other.
### Imports
import numpy as np
import pandas as pd
import obspy as ob
import sys
import os
from os import path
#import splitwavepy as sw
import matplotlib.pyplot as plt
from stack import Stacker,plot_stack
from glob import glob
from datetime import datetime
from matplotlib import gridspec
# Maybe some others
###############################################################################
class Tester:
def __init__(self,pr,path,mode='man'):
'''
Tester contains data and functions to Stack SKS and SKKS lambda2 surfaces in order to test for discrepancy
pr - Full path to the .pairs (if stacking has not been done) file. If a .pairs file is passed in them discre.py wil automatically
attempt to stack the LamR surfacesself.
Alternatively a .stk file can be provided if you just want to do plotting of already existing stacks. Discre.py will
assume that stacking has been done and will not attempt to do any stackingself.
path - Path to the corresponding Runs directory where the .LamR files exist.
'''
self.path =path
date_time_convert = {'TIME': lambda x: str(x).zfill(4),'DATE': lambda x : str(x)}
# print(pr.split('.')[0])
if pr.split('.')[-1] == 'pairs':
# print('Hello')
#File extention is .pairs, lam2 values dont exist so we need to run the stack
self.pairs = | pd.read_csv(pr,delim_whitespace=True,converters=date_time_convert) | pandas.read_csv |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
| Timestamp('2020-01-08 00:00:00', freq='D') | pandas.Timestamp |
# ActivitySim
# See full license in LICENSE.txt.
import os.path
import numpy.testing as npt
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from .. import inject
from .. import simulate
@pytest.fixture(scope='module')
def data_dir():
return os.path.join(os.path.dirname(__file__), 'data')
@pytest.fixture(scope='module')
def spec_name(data_dir):
return 'sample_spec.csv'
@pytest.fixture(scope='module')
def spec(data_dir, spec_name):
return simulate.read_model_spec(file_name=spec_name)
@pytest.fixture(scope='module')
def data(data_dir):
return pd.read_csv(os.path.join(data_dir, 'data.csv'))
def setup_function():
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
inject.add_injectable("configs_dir", configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), f'output')
inject.add_injectable("output_dir", output_dir)
def test_read_model_spec(spec_name):
spec = simulate.read_model_spec(file_name=spec_name)
assert len(spec) == 4
assert spec.index.name == 'Expression'
assert list(spec.columns) == ['alt0', 'alt1']
npt.assert_array_equal(
spec.values,
[[1.1, 11], [2.2, 22], [3.3, 33], [4.4, 44]])
def test_eval_variables(spec, data):
result = simulate.eval_variables(spec.index, data)
expected = pd.DataFrame([
[1, 0, 4, 1],
[0, 1, 4, 1],
[0, 1, 5, 1]],
index=data.index, columns=spec.index)
expected[expected.columns[0]] = expected[expected.columns[0]].astype(np.int8)
expected[expected.columns[1]] = expected[expected.columns[1]].astype(np.int8)
expected[expected.columns[2]] = expected[expected.columns[2]].astype(np.int64)
expected[expected.columns[3]] = expected[expected.columns[3]].astype(int)
print("\nexpected\n%s" % expected.dtypes)
print("\nresult\n%s" % result.dtypes)
pdt.assert_frame_equal(result, expected, check_names=False)
def test_simple_simulate(data, spec):
inject.add_injectable("settings", {'check_for_variability': False})
choices = simulate.simple_simulate(choosers=data, spec=spec, nest_spec=None)
expected = | pd.Series([1, 1, 1], index=data.index) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import pkg_resources
import biom
import q2templates
import numpy as np
import pandas as pd
import seaborn as sns
TEMPLATES = pkg_resources.resource_filename(
'q2_feature_table._core_features', 'core_features_assets')
def core_features(output_dir, table: biom.Table, min_fraction: float = 0.5,
max_fraction: float = 1.0, steps: int = 11) -> None:
if max_fraction < min_fraction:
raise ValueError('min_fraction (%r) parameter must be less than '
'max_fraction (%r) parameter.' %
(min_fraction, max_fraction))
index_fp = os.path.join(TEMPLATES, 'index.html')
context = {
'num_samples': table.shape[1],
'num_features': table.shape[0]
}
if min_fraction == max_fraction:
fractions = [min_fraction]
else:
fractions = np.linspace(min_fraction, max_fraction, steps)
rounded_fractions = _round_fractions(fractions)
data = []
file_links = []
for fraction, rounded_fraction in zip(fractions, rounded_fractions):
core_features = _get_core_features(table, fraction)
core_feature_count = len(core_features)
data.append([fraction, core_feature_count])
if core_feature_count > 0:
core_feature_fn = 'core-features-%s.tsv' % rounded_fraction
core_feature_fp = os.path.join(output_dir, core_feature_fn)
file_links.append("<a href='./%s'>TSV</a>" % core_feature_fn)
core_features.to_csv(core_feature_fp, sep='\t',
index_label='Feature ID')
else:
file_links.append('No core features')
df = pd.DataFrame(data, columns=['Fraction of samples', 'Feature count'])
df['Fraction of features'] = df['Feature count'] / table.shape[0]
df['Feature list'] = file_links
# newer versions of seaborn don't like dataframes with fewer than two rows
if len(fractions) > 1:
ax = sns.regplot(data=df, x='Fraction of samples', y='Feature count',
fit_reg=False)
# matplotlib will issue a UserWarning if attempting to set left and
# right bounds to the same value.
ax.set_xbound(min(fractions), max(fractions))
ax.set_ybound(0, max(df['Feature count']) + 1)
ax.get_figure().savefig(
os.path.join(output_dir, 'core-feature-counts.svg'))
context['show_plot'] = True
context['table_html'] = q2templates.df_to_html(df, index=False,
escape=False)
q2templates.render(index_fp, output_dir, context=context)
def _get_core_features(table, fraction):
filter_f = _get_filter_to_core_f(table, fraction)
feature_filtered_table = table.filter(
filter_f, axis='observation', inplace=False)
index = []
data = []
for values, id_, _ in feature_filtered_table.iter(axis='observation'):
index.append(id_)
data.append(_seven_number_summary(values))
if len(data) > 0:
return | pd.DataFrame(data, index=index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import math
import streamlit as st
class Oversampler:
def __init__(self, X, y, Z, target_col, bias_cols, bias_col_types):
self.X = X
self.y = y
self.Z = Z
self.target_col = target_col
self.bias_cols = bias_cols
self.bias_col_types = bias_col_types
self.n_Z = [[0,0],[0,0]] # number of indices corresponding to each group in Z
self.i_t_Z = [[None, None], [None, None]] # indicies corresponding to Z with positive target label
self.n_t_Z = [[0,0],[0,0]] # number of indices corresponding to Z with positive target
self.f_t_Z = [[0,0],[0,0]] # proportion of points with positive target labels
for i in [0,1]:
for j in [0,1]:
self.n_Z[i][j] = X[ np.logical_and( Z[bias_cols[0]]==i,
Z[bias_cols[1]]==j ) ].shape[0]
self.i_t_Z[i][j] = X.index[np.logical_and(np.logical_and( Z[bias_cols[0]]==i,
Z[bias_cols[1]]==j ),
y==1)].tolist()
self.n_t_Z[i][j] = len(self.i_t_Z[i][j])
self.f_t_Z[i][j] = self.n_t_Z[i][j] / self.n_Z[i][j]
def original_data_stats(self):
st.write('**Some stats on the original data (before oversampling)**')
st.write('Number of data points:', self.X.shape[0])
df = pd.DataFrame(data=self.n_Z,
index=self.bias_col_types[0],
columns=self.bias_col_types[1])
st.write(df)
st.write('Number of data points with', self.target_col)
df = pd.DataFrame(data=self.n_t_Z,
index=self.bias_col_types[0],
columns=self.bias_col_types[1])
st.write(df)
st.write('Proportion for which:', self.target_col)
df = pd.DataFrame(data=self.f_t_Z,
index=self.bias_col_types[0],
columns=self.bias_col_types[1])
st.write(df)
st.write('Bias factors before oversampling:')
df = self.f_t_Z[1][1] / df
st.write(df)
def get_oversampled_data(self, oversample_factor=1):
# Augment the training set by oversampling under-represented classes
X_new = self.X.copy()
y_new = self.y.copy()
Z_new = self.Z.copy()
for i in [0,1]:
for j in [0,1]:
num_new_points = float(oversample_factor)*(self.f_t_Z[1][1]*self.n_Z[i][j] - self.n_t_Z[i][j])/(1.0-self.f_t_Z[1][1])
#st.write(i, j, num_new_points)
if i==0 or j==0:
#frac, integer = math.modf(f_t_Z[1][1] / f_t_Z[i][j])
frac, integer = math.modf(num_new_points / self.n_t_Z[i][j])
n_frac = int(frac*self.n_t_Z[i][j])
#i_frac = np.random.choice(self.i_t_Z[i][j], n_frac)
i_frac = self.i_t_Z[i][j][: n_frac]
for k in range(int(integer)):
X_new = pd.concat([X_new, self.X.loc[self.i_t_Z[i][j]]], ignore_index=True)
y_new = | pd.concat([y_new, self.y.loc[self.i_t_Z[i][j]]], ignore_index=True) | pandas.concat |
class linearmixedeffects():
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.regression import mixed_linear_model
import sklearn as sk
from math import sqrt
import statsmodels.api as sm
def __init__(self, response, fixed, random, predict = 0, mlmf = 0):
self.response = response
self.fixed = fixed
self.random = random
def fitmlm(self):
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
return mlmf
def summarymlm(self):
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
print(" ")
print("The summary of the linear mixed effects model is given below:")
return mlmf.summary()
def plotmlm(self):
import seaborn as sns; sns.set()
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
db_plot = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
def main():
try:
df_train = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.data', header=None)
df_test = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.test',
skiprows=[0], header=None)
except:
df_train = pd.read_csv('adult.data', header=None)
df_test = pd.read_csv('adult.test', skiprows=[0], header=None)
X_train = df_train[df_train != ' ?'].dropna()
X_test = df_test[df_test != ' ?'].dropna()
y_train = | pd.DataFrame(X_train[14]) | pandas.DataFrame |
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
"""
Sklearn dependent models
Decision Tree, Elastic Net, Random Forest, MLPRegressor, KNN, Adaboost
"""
import datetime
import random
import numpy as np
import pandas as pd
from autots.models.base import ModelObject, PredictionObject
from autots.tools.probabilistic import Point_to_Probability
from autots.tools.seasonal import date_part, seasonal_int
from autots.tools.window_functions import window_maker, last_window
def rolling_x_regressor(
df,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = None,
min_rolling_periods: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
ewm_var_alpha: float = None,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
holiday: bool = False,
holiday_country: str = 'US',
polynomial_degree: int = None,
window: int = None,
):
"""
Generate more features from initial time series.
macd_periods ignored if mean_rolling is None.
Returns a dataframe of statistical features. Will need to be shifted by 1 or more to match Y for forecast.
"""
X = df.copy()
if str(mean_rolling_periods).isdigit():
temp = df.rolling(int(mean_rolling_periods), min_periods=1).median()
X = pd.concat([X, temp], axis=1)
if str(macd_periods).isdigit():
temp = df.rolling(int(macd_periods), min_periods=1).median() - temp
X = pd.concat([X, temp], axis=1)
if str(std_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(std_rolling_periods, min_periods=1).std()], axis=1)
if str(max_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(max_rolling_periods, min_periods=1).max()], axis=1)
if str(min_rolling_periods).isdigit():
X = pd.concat([X, df.rolling(min_rolling_periods, min_periods=1).min()], axis=1)
if str(quantile90_rolling_periods).isdigit():
X = pd.concat(
[X, df.rolling(quantile90_rolling_periods, min_periods=1).quantile(0.9)],
axis=1,
)
if str(quantile10_rolling_periods).isdigit():
X = pd.concat(
[X, df.rolling(quantile10_rolling_periods, min_periods=1).quantile(0.1)],
axis=1,
)
if str(ewm_alpha).replace('.', '').isdigit():
X = pd.concat(
[X, df.ewm(alpha=ewm_alpha, ignore_na=True, min_periods=1).mean()], axis=1
)
if str(ewm_var_alpha).replace('.', '').isdigit():
X = pd.concat(
[X, df.ewm(alpha=ewm_var_alpha, ignore_na=True, min_periods=1).var()],
axis=1,
)
if str(additional_lag_periods).isdigit():
X = pd.concat([X, df.shift(additional_lag_periods)], axis=1).fillna(
method='bfill'
)
if abs_energy:
X = pd.concat([X, df.pow(other=([2] * len(df.columns))).cumsum()], axis=1)
if str(rolling_autocorr_periods).isdigit():
temp = df.rolling(rolling_autocorr_periods).apply(
lambda x: x.autocorr(), raw=False
)
X = pd.concat([X, temp], axis=1).fillna(method='bfill')
if add_date_part in ['simple', 'expanded', 'recurring']:
date_part_df = date_part(df.index, method=add_date_part)
date_part_df.index = df.index
X = pd.concat(
[
X,
],
axis=1,
)
if holiday:
from autots.tools.holiday import holiday_flag
X['holiday_flag_'] = holiday_flag(X.index, country=holiday_country)
X['holiday_flag_future_'] = holiday_flag(
X.index.shift(1, freq=pd.infer_freq(X.index)), country=holiday_country
)
if str(polynomial_degree).isdigit():
polynomial_degree = abs(int(polynomial_degree))
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(polynomial_degree)
X = pd.DataFrame(poly.fit_transform(X))
# unlike the others, this pulls the entire window, not just one lag
if str(window).isdigit():
# we already have lag 1 using this
for curr_shift in range(1, window):
X = pd.concat([X, df.shift(curr_shift)], axis=1).fillna(method='bfill')
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(method='ffill').fillna(method='bfill')
X.columns = [str(x) for x in range(len(X.columns))]
return X
def rolling_x_regressor_regressor(
df,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = None,
min_rolling_periods: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
ewm_var_alpha: float = None,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
holiday: bool = False,
holiday_country: str = 'US',
polynomial_degree: int = None,
window: int = None,
future_regressor=None,
):
"""Adds in the future_regressor."""
X = rolling_x_regressor(
df,
mean_rolling_periods=mean_rolling_periods,
macd_periods=macd_periods,
std_rolling_periods=std_rolling_periods,
max_rolling_periods=max_rolling_periods,
min_rolling_periods=min_rolling_periods,
ewm_var_alpha=ewm_var_alpha,
quantile90_rolling_periods=quantile90_rolling_periods,
quantile10_rolling_periods=quantile10_rolling_periods,
additional_lag_periods=additional_lag_periods,
ewm_alpha=ewm_alpha,
abs_energy=abs_energy,
rolling_autocorr_periods=rolling_autocorr_periods,
add_date_part=add_date_part,
holiday=holiday,
holiday_country=holiday_country,
polynomial_degree=polynomial_degree,
window=window,
)
if future_regressor is not None:
X = pd.concat([X, future_regressor], axis=1)
return X
def retrieve_regressor(
regression_model: dict = {
"model": 'Adaboost',
"model_params": {
'n_estimators': 50,
'base_estimator': 'DecisionTree',
'loss': 'linear',
'learning_rate': 1.0,
},
},
verbose: int = 0,
verbose_bool: bool = False,
random_seed: int = 2020,
n_jobs: int = 1,
multioutput: bool = True,
):
"""Convert a model param dict to model object for regression frameworks."""
model_class = regression_model['model']
model_param_dict = regression_model.get("model_params", {})
if model_class == 'ElasticNet':
if multioutput:
from sklearn.linear_model import MultiTaskElasticNet
regr = MultiTaskElasticNet(
alpha=1.0, random_state=random_seed, **model_param_dict
)
else:
from sklearn.linear_model import ElasticNet
regr = ElasticNet(alpha=1.0, random_state=random_seed, **model_param_dict)
return regr
elif model_class == 'DecisionTree':
from sklearn.tree import DecisionTreeRegressor
regr = DecisionTreeRegressor(random_state=random_seed, **model_param_dict)
return regr
elif model_class == 'MLP':
from sklearn.neural_network import MLPRegressor
regr = MLPRegressor(
random_state=random_seed, verbose=verbose_bool, **model_param_dict
)
return regr
elif model_class == 'KerasRNN':
from autots.models.dnn import KerasRNN
regr = KerasRNN(verbose=verbose, random_seed=random_seed, **model_param_dict)
return regr
elif model_class == 'Transformer':
from autots.models.dnn import Transformer
regr = Transformer(verbose=verbose, random_seed=random_seed, **model_param_dict)
return regr
elif model_class == 'KNN':
from sklearn.neighbors import KNeighborsRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
KNeighborsRegressor(**model_param_dict),
n_jobs=n_jobs,
)
else:
regr = KNeighborsRegressor(**model_param_dict, n_jobs=n_jobs)
return regr
elif model_class == 'HistGradientBoost':
try:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
except Exception:
pass
from sklearn.ensemble import HistGradientBoostingRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
HistGradientBoostingRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
**model_param_dict,
)
)
else:
regr = HistGradientBoostingRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
**model_param_dict,
)
return regr
elif model_class == 'LightGBM':
from lightgbm import LGBMRegressor
regr = LGBMRegressor(
verbose=int(verbose_bool),
random_state=random_seed,
n_jobs=n_jobs,
**model_param_dict,
)
if multioutput:
from sklearn.multioutput import RegressorChain
return RegressorChain(regr)
else:
return regr
elif model_class == 'Adaboost':
from sklearn.ensemble import AdaBoostRegressor
if regression_model["model_params"]['base_estimator'] == 'SVR':
from sklearn.svm import LinearSVR
svc = LinearSVR(verbose=verbose, random_state=random_seed, max_iter=1500)
regr = AdaBoostRegressor(
base_estimator=svc,
n_estimators=regression_model["model_params"]['n_estimators'],
loss=regression_model["model_params"]['loss'],
learning_rate=regression_model["model_params"]['learning_rate'],
random_state=random_seed,
)
elif regression_model["model_params"]['base_estimator'] == 'LinReg':
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
regr = AdaBoostRegressor(
base_estimator=linreg,
n_estimators=regression_model["model_params"]['n_estimators'],
loss=regression_model["model_params"]['loss'],
learning_rate=regression_model["model_params"]['learning_rate'],
random_state=random_seed,
)
else:
regr = AdaBoostRegressor(random_state=random_seed, **model_param_dict)
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(regr, n_jobs=n_jobs)
else:
return regr
elif model_class == 'xgboost':
import xgboost as xgb
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
xgb.XGBRegressor(verbosity=verbose, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = xgb.XGBRegressor(
verbosity=verbose, **model_param_dict, n_jobs=n_jobs
)
return regr
elif model_class == 'SVM':
from sklearn.svm import LinearSVR
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
LinearSVR(verbose=verbose_bool, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = LinearSVR(verbose=verbose_bool, **model_param_dict)
return regr
elif model_class == 'BayesianRidge':
from sklearn.linear_model import BayesianRidge
regr = BayesianRidge(**model_param_dict)
if multioutput:
from sklearn.multioutput import RegressorChain
return RegressorChain(regr)
else:
return regr
elif model_class == "ExtraTrees":
from sklearn.ensemble import ExtraTreesRegressor
return ExtraTreesRegressor(
n_jobs=n_jobs, random_state=random_seed, **model_param_dict
)
elif model_class == "RadiusNeighbors":
from sklearn.neighbors import RadiusNeighborsRegressor
regr = RadiusNeighborsRegressor(n_jobs=n_jobs, **model_param_dict)
return regr
elif model_class == "PoissonRegresssion":
from sklearn.linear_model import PoissonRegressor
if multioutput:
from sklearn.multioutput import MultiOutputRegressor
regr = MultiOutputRegressor(
PoissonRegressor(fit_intercept=True, max_iter=200, **model_param_dict),
n_jobs=n_jobs,
)
else:
regr = PoissonRegressor(**model_param_dict)
return regr
elif model_class == 'RANSAC':
from sklearn.linear_model import RANSACRegressor
return RANSACRegressor(random_state=random_seed, **model_param_dict)
else:
regression_model['model'] = 'RandomForest'
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(
random_state=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**model_param_dict,
)
return regr
# models that can more quickly handle many X/Y obs, with modest number of features
sklearn_model_dict = {
'RandomForest': 0.02,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05, # was slow, LinearSVR seems much faster
'BayesianRidge': 0.05,
'xgboost': 0.01,
'KerasRNN': 0.02,
'Transformer': 0.02,
'HistGradientBoost': 0.03,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.02,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
multivariate_model_dict = {
'RandomForest': 0.02,
# 'ElasticNet': 0.05,
'MLP': 0.03,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05,
# 'BayesianRidge': 0.05,
'xgboost': 0.01,
'KerasRNN': 0.01,
'HistGradientBoost': 0.03,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.02,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
# these should train quickly with low dimensional X/Y, and not mind being run multiple in parallel
univariate_model_dict = {
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.03,
'Adaboost': 0.05,
'SVM': 0.05,
'BayesianRidge': 0.03,
'HistGradientBoost': 0.02,
'LightGBM': 0.01,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.05,
'RANSAC': 0.02,
}
# for high dimensionality, many-feature X, many-feature Y, but with moderate obs count
rolling_regression_dict = {
'RandomForest': 0.02,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'KNN': 0.05,
'Adaboost': 0.03,
'SVM': 0.05,
'KerasRNN': 0.02,
'LightGBM': 0.03,
'ExtraTrees': 0.05,
'RadiusNeighbors': 0.01,
'PoissonRegresssion': 0.03,
'RANSAC': 0.05,
}
# models where we can be sure the model isn't sharing information across multiple Y's...
no_shared_model_dict = {
'KNN': 0.1,
'Adaboost': 0.1,
'SVM': 0.1,
'xgboost': 0.1,
'HistGradientBoost': 0.1,
'PoissonRegresssion': 0.05,
}
# these are models that are relatively fast with large multioutput Y, small n obs
datepart_model_dict: dict = {
'RandomForest': 0.05,
'ElasticNet': 0.05,
'MLP': 0.05,
'DecisionTree': 0.05,
'Adaboost': 0.05,
'SVM': 0.05,
'KerasRNN': 0.05,
'Transformer': 0.05,
'ExtraTrees': 0.07,
'RadiusNeighbors': 0.05,
}
def generate_regressor_params(
model_dict=None,
):
if model_dict is None:
model_dict = sklearn_model_dict
"""Generate new parameters for input to regressor."""
model = random.choices(list(model_dict.keys()), list(model_dict.values()), k=1)[0]
if model in [
'xgboost',
'Adaboost',
'DecisionTree',
'LightGBM',
'MLP',
'KNN',
'KerasRNN',
'Transformer',
'HistGradientBoost',
'RandomForest',
'ExtraTrees',
]:
if model == 'Adaboost':
param_dict = {
"model": 'Adaboost',
"model_params": {
"n_estimators": random.choices([50, 100, 500], [0.7, 0.2, 0.1])[0],
"loss": random.choices(
['linear', 'square', 'exponential'], [0.8, 0.01, 0.1]
)[0],
"base_estimator": random.choices(
[None, 'LinReg', 'SVR'], [0.8, 0.1, 0.1]
)[0],
"learning_rate": random.choices([1, 0.5], [0.9, 0.1])[0],
},
}
elif model == 'xgboost':
param_dict = {
"model": 'xgboost',
"model_params": {
"objective": np.random.choice(
['count:poisson', 'reg:squarederror', 'reg:gamma'],
p=[0.4, 0.5, 0.1],
size=1,
).item(),
"eta": np.random.choice([0.3], p=[1.0], size=1).item(),
"min_child_weight": np.random.choice(
[1, 2, 5], p=[0.8, 0.1, 0.1], size=1
).item(),
"max_depth": np.random.choice(
[3, 6, 9], p=[0.1, 0.8, 0.1], size=1
).item(),
"subsample": np.random.choice(
[1, 0.7, 0.5], p=[0.9, 0.05, 0.05], size=1
).item(),
},
}
elif model == 'MLP':
solver = np.random.choice(
['lbfgs', 'sgd', 'adam'], p=[0.5, 0.1, 0.4], size=1
).item()
if solver in ['sgd', 'adam']:
early_stopping = np.random.choice([True, False], size=1).item()
learning_rate_init = np.random.choice(
[0.01, 0.001, 0.0001, 0.00001], p=[0.1, 0.7, 0.1, 0.1], size=1
).item()
else:
early_stopping = False
learning_rate_init = 0.001
param_dict = {
"model": 'MLP',
"model_params": {
"hidden_layer_sizes": random.choices(
[
(100,),
(25, 15, 25),
(72, 36, 72),
(25, 50, 25),
(32, 64, 32),
(32, 32, 32),
],
[0.1, 0.3, 0.3, 0.1, 0.1, 0.1],
)[0],
"max_iter": np.random.choice(
[250, 500, 1000], p=[0.8, 0.1, 0.1], size=1
).item(),
"activation": np.random.choice(
['identity', 'logistic', 'tanh', 'relu'],
p=[0.05, 0.05, 0.6, 0.3],
size=1,
).item(),
"solver": solver,
"early_stopping": early_stopping,
"learning_rate_init": learning_rate_init,
},
}
elif model == 'KNN':
param_dict = {
"model": 'KNN',
"model_params": {
"n_neighbors": np.random.choice(
[3, 5, 10], p=[0.2, 0.7, 0.1], size=1
).item(),
"weights": np.random.choice(
['uniform', 'distance'], p=[0.7, 0.3], size=1
).item(),
},
}
elif model == 'RandomForest':
param_dict = {
"model": 'RandomForest',
"model_params": {
"n_estimators": random.choices(
[300, 100, 1000, 5000], [0.4, 0.4, 0.2, 0.01]
)[0],
"min_samples_leaf": random.choices([2, 4, 1], [0.2, 0.2, 0.8])[0],
"bootstrap": random.choices([True, False], [0.9, 0.1])[0],
# absolute_error is noticeably slower
# "criterion": random.choices(
# ["squared_error", "poisson"], [0.99, 0.001]
# )[0],
},
}
elif model == 'ExtraTrees':
max_depth_choice = random.choices([None, 5, 10, 20], [0.2, 0.1, 0.5, 0.3])[
0
]
estimators_choice = random.choices([50, 100, 500], [0.05, 0.9, 0.05])[0]
param_dict = {
"model": 'ExtraTrees',
"model_params": {
"n_estimators": estimators_choice,
"min_samples_leaf": random.choices([2, 4, 1], [0.1, 0.1, 0.8])[0],
"max_depth": max_depth_choice,
# "criterion": "squared_error",
},
}
elif model == 'KerasRNN':
init_list = [
'glorot_uniform',
'lecun_uniform',
'glorot_normal',
'RandomUniform',
'he_normal',
'zeros',
]
param_dict = {
"model": 'KerasRNN',
"model_params": {
"kernel_initializer": random.choices(init_list)[0],
"epochs": random.choices(
[50, 100, 200, 500, 750], [0.75, 0.2, 0.05, 0.01, 0.001]
)[0],
"batch_size": random.choices([8, 16, 32, 72], [0.2, 0.2, 0.5, 0.1])[
0
],
"optimizer": random.choices(
['adam', 'rmsprop', 'adagrad'], [0.4, 0.5, 0.1]
)[0],
"loss": random.choices(
['mae', 'Huber', 'poisson', 'mse', 'mape'],
[0.2, 0.3, 0.1, 0.2, 0.2],
)[0],
"hidden_layer_sizes": random.choices(
[
(100,),
(32,),
(72, 36, 72),
(25, 50, 25),
(32, 64, 32),
(32, 32, 32),
],
[0.1, 0.3, 0.3, 0.1, 0.1, 0.1],
)[0],
"rnn_type": random.choices(
['LSTM', 'GRU', "E2D2", "CNN"], [0.5, 0.3, 0.15, 0.01]
)[0],
"shape": random.choice([1, 2]),
},
}
elif model == 'Transformer':
param_dict = {
"model": 'Transformer',
"model_params": {
"epochs": random.choices(
[50, 100, 200, 500, 750], [0.75, 0.2, 0.05, 0.01, 0.001]
)[0],
"batch_size": random.choices(
[8, 16, 32, 64, 72], [0.01, 0.2, 0.5, 0.1, 0.1]
)[0],
"optimizer": random.choices(
['adam', 'rmsprop', 'adagrad'], [0.4, 0.5, 0.1]
)[0],
"loss": random.choices(
['mae', 'Huber', 'poisson', 'mse', 'mape'],
[0.2, 0.3, 0.1, 0.2, 0.2],
)[0],
"head_size": random.choices(
[32, 64, 128, 256, 384], [0.1, 0.1, 0.3, 0.5, 0.05]
)[0],
"num_heads": random.choices([2, 4], [0.2, 0.2])[0],
"ff_dim": random.choices(
[2, 3, 4, 32, 64], [0.1, 0.1, 0.8, 0.05, 0.05]
)[0],
"num_transformer_blocks": random.choices(
[1, 2, 4, 6],
[0.2, 0.2, 0.6, 0.05],
)[0],
"mlp_units": random.choices(
[32, 64, 128, 256],
[0.2, 0.3, 0.8, 0.2],
),
"mlp_dropout": random.choices(
[0.05, 0.2, 0.4],
[0.2, 0.8, 0.2],
)[0],
"dropout": random.choices(
[0.05, 0.2, 0.4],
[0.2, 0.8, 0.2],
)[0],
},
}
elif model == 'HistGradientBoost':
param_dict = {
"model": 'HistGradientBoost',
"model_params": {
"loss": random.choices(
['squared_error', 'poisson', 'absolute_error'], [0.8, 0.1, 0.1]
)[0],
"learning_rate": random.choices([1, 0.1, 0.01], [0.3, 0.4, 0.3])[0],
"max_depth": random.choices(
[None, 5, 10, 20], [0.7, 0.1, 0.1, 0.1]
)[0],
"min_samples_leaf": random.choices(
[20, 5, 10, 30], [0.9, 0.1, 0.1, 0.1]
)[0],
"max_iter": random.choices(
[100, 250, 50, 500], [0.9, 0.1, 0.1, 0.001]
)[0],
"l2_regularization": random.choices(
[0, 0.01, 0.02, 0.4], [0.9, 0.1, 0.1, 0.1]
)[0],
},
}
elif model == 'LightGBM':
param_dict = {
"model": 'LightGBM',
"model_params": {
"objective": random.choices(
[
'regression',
'gamma',
'huber',
'regression_l1',
'tweedie',
'poisson',
'quantile',
],
[0.4, 0.2, 0.2, 0.2, 0.2, 0.05, 0.01],
)[0],
"learning_rate": random.choices(
[0.001, 0.1, 0.01],
[0.1, 0.6, 0.3],
)[0],
"num_leaves": random.choices(
[31, 127, 70],
[0.6, 0.1, 0.3],
)[0],
"max_depth": random.choices(
[-1, 5, 10],
[0.6, 0.1, 0.3],
)[0],
"boosting_type": random.choices(
['gbdt', 'rf', 'dart', 'goss'],
[0.6, 0, 0.2, 0.2],
)[0],
"n_estimators": random.choices(
[100, 250, 50, 500],
[0.6, 0.1, 0.3, 0.0010],
)[0],
},
}
else:
min_samples = np.random.choice(
[1, 2, 0.05], p=[0.5, 0.3, 0.2], size=1
).item()
min_samples = int(min_samples) if min_samples in [2] else min_samples
param_dict = {
"model": 'DecisionTree',
"model_params": {
"max_depth": np.random.choice(
[None, 3, 9], p=[0.5, 0.3, 0.2], size=1
).item(),
"min_samples_split": min_samples,
},
}
else:
param_dict = {"model": model, "model_params": {}}
return param_dict
class RollingRegression(ModelObject):
"""General regression-framed approach to forecasting using sklearn.
Who are you who are so wise in the ways of science?
I am Arthur, King of the Britons. -Python
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holiday flags
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "RollingRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
holiday_country: str = 'US',
verbose: int = 0,
random_seed: int = 2020,
regression_model: dict = {
"model": 'ExtraTrees',
"model_params": {},
},
holiday: bool = False,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = 7,
min_rolling_periods: int = 7,
ewm_var_alpha: int = None,
quantile90_rolling_periods: int = None,
quantile10_rolling_periods: int = None,
ewm_alpha: float = 0.5,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
polynomial_degree: int = None,
x_transform: str = None,
window: int = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.regression_model = regression_model
self.holiday = holiday
self.mean_rolling_periods = mean_rolling_periods
if mean_rolling_periods is None:
self.macd_periods = None
else:
self.macd_periods = macd_periods
self.std_rolling_periods = std_rolling_periods
self.max_rolling_periods = max_rolling_periods
self.min_rolling_periods = min_rolling_periods
self.ewm_var_alpha = ewm_var_alpha
self.quantile90_rolling_periods = quantile90_rolling_periods
self.quantile10_rolling_periods = quantile10_rolling_periods
self.ewm_alpha = ewm_alpha
self.additional_lag_periods = additional_lag_periods
self.abs_energy = abs_energy
self.rolling_autocorr_periods = rolling_autocorr_periods
self.add_date_part = add_date_part
self.polynomial_degree = polynomial_degree
self.x_transform = x_transform
self.window = window
def _x_transformer(self):
if self.x_transform == 'FastICA':
from sklearn.decomposition import FastICA
x_transformer = FastICA(n_components=None, random_state=2020, whiten=True)
elif self.x_transform == 'Nystroem':
from sklearn.kernel_approximation import Nystroem
half_size = int(self.sktraindata.shape[0] / 2) + 1
max_comp = 200
n_comp = max_comp if half_size > max_comp else half_size
x_transformer = Nystroem(
kernel='rbf', gamma=0.2, random_state=2020, n_components=n_comp
)
else:
# self.x_transform = 'RmZeroVariance'
from sklearn.feature_selection import VarianceThreshold
x_transformer = VarianceThreshold(threshold=0.0)
return x_transformer
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
future_regressor (pandas.DataFrame or Series): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"future_regressor not supplied, necessary for regression_type"
)
self.regressor_train = future_regressor
# define X and Y
self.sktraindata = self.df_train.dropna(how='all', axis=0)
self.sktraindata = self.sktraindata.fillna(method='ffill').fillna(
method='bfill'
)
Y = self.sktraindata.drop(self.sktraindata.head(2).index)
Y.columns = [x for x in range(len(Y.columns))]
X = rolling_x_regressor(
self.sktraindata,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
quantile90_rolling_periods=self.quantile90_rolling_periods,
quantile10_rolling_periods=self.quantile10_rolling_periods,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
window=self.window,
)
if self.regression_type == 'User':
X = pd.concat([X, self.regressor_train], axis=1)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
self.x_transformer = self._x_transformer()
self.x_transformer = self.x_transformer.fit(X)
X = pd.DataFrame(self.x_transformer.transform(X))
X = X.replace([np.inf, -np.inf], 0).fillna(0)
"""
Tail(1) is dropped to shift data to become forecast 1 ahead
and the first one is dropped because it will least accurately represent
rolling values
"""
X = X.drop(X.tail(1).index).drop(X.head(1).index)
if isinstance(X, pd.DataFrame):
X.columns = [str(xc) for xc in X.columns]
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
# retrieve model object to train
self.regr = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.regr = self.regr.fit(X, Y)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
if self.regression_type in ['User', 'user']:
complete_regressor = pd.concat(
[self.regressor_train, future_regressor], axis=0
)
combined_index = self.df_train.index.append(index)
forecast = pd.DataFrame()
self.sktraindata.columns = [x for x in range(len(self.sktraindata.columns))]
# forecast, 1 step ahead, then another, and so on
for x in range(forecast_length):
x_dat = rolling_x_regressor(
self.sktraindata,
mean_rolling_periods=self.mean_rolling_periods,
macd_periods=self.macd_periods,
std_rolling_periods=self.std_rolling_periods,
max_rolling_periods=self.max_rolling_periods,
min_rolling_periods=self.min_rolling_periods,
ewm_var_alpha=self.ewm_var_alpha,
quantile90_rolling_periods=self.quantile90_rolling_periods,
quantile10_rolling_periods=self.quantile10_rolling_periods,
additional_lag_periods=self.additional_lag_periods,
ewm_alpha=self.ewm_alpha,
abs_energy=self.abs_energy,
rolling_autocorr_periods=self.rolling_autocorr_periods,
add_date_part=self.add_date_part,
holiday=self.holiday,
holiday_country=self.holiday_country,
polynomial_degree=self.polynomial_degree,
)
if self.regression_type == 'User':
x_dat = pd.concat(
[x_dat, complete_regressor.head(x_dat.shape[0])], axis=1
).fillna(0)
if self.x_transform in ['FastICA', 'Nystroem', 'RmZeroVariance']:
x_dat = pd.DataFrame(self.x_transformer.transform(x_dat))
x_dat = x_dat.replace([np.inf, -np.inf], 0).fillna(0)
if isinstance(x_dat, pd.DataFrame):
x_dat.columns = [str(xc) for xc in x_dat.columns]
rfPred = pd.DataFrame(self.regr.predict(x_dat.tail(1).to_numpy()))
forecast = pd.concat([forecast, rfPred], axis=0, ignore_index=True)
self.sktraindata = pd.concat(
[self.sktraindata, rfPred], axis=0, ignore_index=True
)
self.sktraindata.index = combined_index[: len(self.sktraindata.index)]
forecast.columns = self.column_names
forecast.index = index
if just_point_forecast:
return forecast
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
forecast,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
rolling_model_dict = sklearn_model_dict.copy()
del rolling_model_dict['KNN']
model_choice = generate_regressor_params(model_dict=rolling_model_dict)
mean_rolling_periods_choice = random.choices(
[None, 5, 7, 12, 30], [0.2, 0.2, 0.2, 0.2, 0.2]
)[0]
if mean_rolling_periods_choice is not None:
macd_periods_choice = seasonal_int()
if macd_periods_choice == mean_rolling_periods_choice:
macd_periods_choice = mean_rolling_periods_choice + 10
else:
macd_periods_choice = None
std_rolling_periods_choice = random.choices(
[None, 5, 7, 10, 30], [0.6, 0.1, 0.1, 0.1, 0.1]
)[0]
max_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
min_rolling_periods_choice = random.choices([None, seasonal_int()], [0.5, 0.5])[
0
]
lag_periods_choice = seasonal_int() - 1
lag_periods_choice = 2 if lag_periods_choice < 2 else lag_periods_choice
ewm_choice = random.choices(
[None, 0.05, 0.1, 0.2, 0.5, 0.8], [0.4, 0.01, 0.05, 0.1, 0.1, 0.05]
)[0]
abs_energy_choice = random.choices([True, False], [0.3, 0.7])[0]
rolling_autocorr_periods_choice = random.choices(
[None, 2, 7, 12, 30], [0.8, 0.05, 0.05, 0.05, 0.05]
)[0]
add_date_part_choice = random.choices(
[None, 'simple', 'expanded', 'recurring'], [0.7, 0.1, 0.1, 0.1]
)[0]
holiday_choice = random.choices([True, False], [0.2, 0.8])[0]
polynomial_degree_choice = random.choices([None, 2], [0.99, 0.01])[0]
x_transform_choice = random.choices(
[None, 'FastICA', 'Nystroem', 'RmZeroVariance'],
[0.85, 0.05, 0.05, 0.05],
)[0]
if "regressor" in method:
regression_choice = "User"
else:
regression_choice = random.choices([None, 'User'], [0.7, 0.3])[0]
parameter_dict = {
'regression_model': model_choice,
'holiday': holiday_choice,
'mean_rolling_periods': mean_rolling_periods_choice,
'macd_periods': macd_periods_choice,
'std_rolling_periods': std_rolling_periods_choice,
'max_rolling_periods': max_rolling_periods_choice,
'min_rolling_periods': min_rolling_periods_choice,
'ewm_alpha': ewm_choice,
'additional_lag_periods': lag_periods_choice,
'abs_energy': abs_energy_choice,
'rolling_autocorr_periods': rolling_autocorr_periods_choice,
'add_date_part': add_date_part_choice,
'polynomial_degree': polynomial_degree_choice,
'x_transform': x_transform_choice,
'regression_type': regression_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'regression_model': self.regression_model,
'holiday': self.holiday,
'mean_rolling_periods': self.mean_rolling_periods,
'macd_periods': self.macd_periods,
'std_rolling_periods': self.std_rolling_periods,
'max_rolling_periods': self.max_rolling_periods,
'min_rolling_periods': self.min_rolling_periods,
"ewm_var_alpha": self.ewm_var_alpha,
"quantile90_rolling_periods": self.quantile90_rolling_periods,
"quantile10_rolling_periods": self.quantile10_rolling_periods,
'ewm_alpha': self.ewm_alpha,
'additional_lag_periods': self.additional_lag_periods,
'abs_energy': self.abs_energy,
'rolling_autocorr_periods': self.rolling_autocorr_periods,
'add_date_part': self.add_date_part,
'polynomial_degree': self.polynomial_degree,
'x_transform': self.x_transform,
'regression_type': self.regression_type,
}
return parameter_dict
class WindowRegression(ModelObject):
"""Regression use the last n values as the basis of training data.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
# regression_type: str = None,
"""
def __init__(
self,
name: str = "WindowRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2022,
verbose: int = 0,
window_size: int = 10,
regression_model: dict = {
"model": 'RandomForest',
"model_params": {},
},
input_dim: str = 'univariate',
output_dim: str = 'forecast_length',
normalize_window: bool = False,
shuffle: bool = False,
forecast_length: int = 1,
max_windows: int = 5000,
regression_type: str = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
regression_type=regression_type,
verbose=verbose,
n_jobs=n_jobs,
)
self.window_size = abs(int(window_size))
self.regression_model = regression_model
self.input_dim = input_dim
self.output_dim = output_dim
self.normalize_window = normalize_window
self.shuffle = shuffle
self.forecast_length = forecast_length
self.max_windows = abs(int(max_windows))
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
if (
df.shape[1] * self.forecast_length
) > 200 and self.input_dim == "multivariate":
raise ValueError(
"Scale exceeds recommendation for input_dim == `multivariate`"
)
df = self.basic_profile(df)
if self.regression_type in ["User", "user"]:
if future_regressor is None:
raise ValueError(
"regression_type='User' but no future_regressor passed"
)
self.df_train = df
X, Y = window_maker(
df,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
shuffle=self.shuffle,
output_dim=self.output_dim,
forecast_length=self.forecast_length,
max_windows=self.max_windows,
regression_type=self.regression_type,
future_regressor=future_regressor,
random_seed=self.random_seed,
)
multioutput = True
if Y.ndim < 2:
multioutput = False
elif Y.shape[1] < 2:
multioutput = False
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
self.regr = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.regr = self.regr.fit(X, Y)
self.last_window = df.tail(self.window_size)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
if int(forecast_length) > int(self.forecast_length):
print("Regression must be refit to change forecast length!")
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
if self.output_dim == '1step':
# combined_index = (self.df_train.index.append(index))
forecast = pd.DataFrame()
# forecast, 1 step ahead, then another, and so on
for x in range(forecast_length):
pred = last_window(
self.last_window,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
)
if self.regression_type in ["User", "user"]:
blasted_thing = future_regressor.iloc[x].to_frame().transpose()
tmerg = pd.concat([blasted_thing] * pred.shape[0], axis=0)
tmerg.index = pred.index
pred = pd.concat([pred, tmerg], axis=1, ignore_index=True)
if isinstance(pred, pd.DataFrame):
pred = pred.to_numpy()
rfPred = pd.DataFrame(self.regr.predict(pred))
if self.input_dim == 'univariate':
rfPred = rfPred.transpose()
rfPred.columns = self.last_window.columns
forecast = pd.concat([forecast, rfPred], axis=0, ignore_index=True)
self.last_window = pd.concat(
[self.last_window, rfPred], axis=0, ignore_index=True
)
df = forecast
else:
pred = last_window(
self.last_window,
window_size=self.window_size,
input_dim=self.input_dim,
normalize_window=self.normalize_window,
)
if self.regression_type in ["User", "user"]:
tmerg = future_regressor.tail(1).loc[
future_regressor.tail(1).index.repeat(pred.shape[0])
]
tmerg.index = pred.index
pred = pd.concat([pred, tmerg], axis=1)
if isinstance(pred, pd.DataFrame):
pred = pred.to_numpy()
cY = pd.DataFrame(self.regr.predict(pred))
if self.input_dim == 'multivariate':
cY.index = ['values']
cY.columns = np.tile(self.column_names, reps=self.forecast_length)
cY = cY.transpose().reset_index()
cY['timestep'] = np.repeat(
range(forecast_length), repeats=len(self.column_names)
)
cY = pd.pivot_table(cY, index='timestep', columns='index')
else:
cY = cY.transpose()
df = cY
df.columns = self.column_names
df.index = index
if just_point_forecast:
return df
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
df,
prediction_interval=self.prediction_interval,
method='historic_quantile',
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
window_size_choice = random.choice([5, 10, 20, seasonal_int()])
model_choice = generate_regressor_params()
if "regressor" in method:
regression_type_choice = "User"
input_dim_choice = 'univariate'
output_dim_choice = random.choice(
['forecast_length', '1step'],
)
else:
input_dim_choice = random.choices(
['multivariate', 'univariate'], [0.01, 0.99]
)[0]
if input_dim_choice == "multivariate":
output_dim_choice = "1step"
regression_type_choice = None
else:
output_dim_choice = random.choice(
['forecast_length', '1step'],
)
regression_type_choice = random.choices(
[None, "User"], weights=[0.8, 0.2]
)[0]
normalize_window_choice = random.choices([True, False], [0.05, 0.95])[0]
max_windows_choice = random.choices([5000, 1000, 50000], [0.85, 0.05, 0.1])[0]
return {
'window_size': window_size_choice,
'input_dim': input_dim_choice,
'output_dim': output_dim_choice,
'normalize_window': normalize_window_choice,
'max_windows': max_windows_choice,
'regression_type': regression_type_choice,
'regression_model': model_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'window_size': self.window_size,
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'normalize_window': self.normalize_window,
'max_windows': self.max_windows,
'regression_type': self.regression_type,
'regression_model': self.regression_model,
}
class ComponentAnalysis(ModelObject):
"""Forecasting on principle components.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
model (str): An AutoTS model str
model_parameters (dict): parameters to pass to AutoTS model
n_components (int): int or 'NthN' number of components to use
decomposition (str): decomposition method to use from scikit-learn
"""
def __init__(
self,
name: str = "ComponentAnalysis",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_components: int = 10,
forecast_length: int = 14,
model: str = 'GLS',
model_parameters: dict = {},
decomposition: str = 'PCA',
n_jobs: int = -1,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.model = model
self.model_parameters = model_parameters
self.decomposition = decomposition
self.n_components = n_components
self.forecast_length = forecast_length
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
if 'thN' in str(self.n_components):
n_int = int(''.join([x for x in str(self.n_components) if x.isdigit()]))
n_int = int(np.floor(df.shape[1] / n_int))
n_int = n_int if n_int >= 2 else 2
else:
n_int = int(''.join([x for x in str(self.n_components) if x.isdigit()]))
self.n_int = n_int
if self.decomposition == 'TruncatedSVD':
from sklearn.decomposition import TruncatedSVD
transformer = TruncatedSVD(
n_components=self.n_int, random_state=self.random_seed
)
elif self.decomposition == 'WhitenedPCA':
from sklearn.decomposition import PCA
transformer = PCA(
n_components=self.n_int, whiten=True, random_state=self.random_seed
)
elif self.decomposition == 'PCA':
from sklearn.decomposition import PCA
transformer = PCA(
n_components=self.n_int, whiten=False, random_state=self.random_seed
)
elif self.decomposition == 'KernelPCA':
from sklearn.decomposition import KernelPCA
transformer = KernelPCA(
n_components=self.n_int,
kernel='rbf',
random_state=self.random_seed,
fit_inverse_transform=True,
)
elif self.decomposition == 'FastICA':
from sklearn.decomposition import FastICA
transformer = FastICA(
n_components=self.n_int,
whiten=True,
random_state=self.random_seed,
max_iter=500,
)
try:
self.transformer = transformer.fit(df)
except ValueError:
raise ValueError(
"n_components and decomposition not suitable for this dataset."
)
X = self.transformer.transform(df)
X = pd.DataFrame(X)
X.index = df.index
from autots.evaluator.auto_model import ModelMonster
try:
self.modelobj = ModelMonster(
self.model,
parameters=self.model_parameters,
frequency=self.frequency,
prediction_interval=self.prediction_interval,
holiday_country=self.holiday_country,
random_seed=self.random_seed,
verbose=self.verbose,
n_jobs=self.n_jobs,
forecast_length=self.forecast_length,
).fit(X, future_regressor=future_regressor)
except Exception as e:
raise ValueError(f"Model {str(self.model)} with error: {repr(e)}")
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
XA = self.modelobj.predict(
forecast_length=forecast_length, future_regressor=future_regressor
)
Xf = self.transformer.inverse_transform(np.array(XA.forecast))
if not isinstance(Xf, pd.DataFrame):
Xf = pd.DataFrame(Xf)
Xf.columns = self.column_names
Xf.index = self.create_forecast_index(forecast_length=forecast_length)
Xf = Xf.astype(float)
if just_point_forecast:
return Xf
else:
"""
upper_forecast = self.transformer.inverse_transform(np.array(XA.upper_forecast))
if not isinstance(upper_forecast, pd.DataFrame):
upper_forecast = pd.DataFrame(upper_forecast)
upper_forecast.columns = self.column_names
upper_forecast.index = self.create_forecast_index(forecast_length=forecast_length)
lower_forecast = self.transformer.inverse_transform(np.array(XA.lower_forecast))
if not isinstance(lower_forecast, pd.DataFrame):
lower_forecast = pd.DataFrame(lower_forecast)
lower_forecast.columns = self.column_names
lower_forecast.index = self.create_forecast_index(forecast_length=forecast_length)
"""
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
Xf,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=Xf.index,
forecast_columns=Xf.columns,
lower_forecast=lower_forecast,
forecast=Xf,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
n_components_choice = np.random.choice(
a=[10, '10thN'], size=1, p=[0.6, 0.4]
).item()
decomposition_choice = np.random.choice(
a=['TruncatedSVD', 'WhitenedPCA', 'PCA', 'KernelPCA', 'FastICA'],
size=1,
p=[0.05, 0.05, 0.5, 0.2, 0.2],
).item()
model_list = [
'LastValueNaive',
'GLS',
'TensorflowSTS',
'GLM',
'ETS',
'FBProphet',
'MotifSimulation',
'RollingRegression',
'WindowRegression',
'UnobservedComponents',
'VECM',
]
model_str = np.random.choice(
model_list,
size=1,
p=[0.01, 0.01, 0.01, 0.01, 0.01, 0.7, 0.01, 0.02, 0.1, 0.1, 0.02],
).item()
model_str = np.random.choice(model_list)
from autots.evaluator.auto_model import ModelMonster
param_dict = ModelMonster(model_str).get_new_params()
return {
'model': model_str,
'model_parameters': param_dict,
'decomposition': decomposition_choice,
'n_components': n_components_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'model': self.model,
'model_parameters': self.model_parameters,
'decomposition': self.decomposition,
'n_components': self.n_components,
}
class DatepartRegression(ModelObject):
"""Regression not on series but datetime
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "DatepartRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
forecast_length: int = 1,
n_jobs: int = None,
regression_model: dict = {
"model": 'DecisionTree',
"model_params": {"max_depth": 5, "min_samples_split": 2},
},
datepart_method: str = 'expanded',
regression_type: str = None,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
regression_type=regression_type,
verbose=verbose,
n_jobs=n_jobs,
)
self.regression_model = regression_model
self.datepart_method = datepart_method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"regression_type='User' but no future_regressor passed"
)
y = df.to_numpy()
X = date_part(df.index, method=self.datepart_method)
if self.regression_type in ['User', 'user']:
# regr = future_regressor.copy()
# regr.index = X.index
X = pd.concat([X, future_regressor], axis=1)
X.columns = [str(xc) for xc in X.columns]
multioutput = True
if y.ndim < 2:
multioutput = False
elif y.shape[1] < 2:
multioutput = False
y = y.ravel()
self.model = retrieve_regressor(
regression_model=self.regression_model,
verbose=self.verbose,
verbose_bool=self.verbose_bool,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
multioutput=multioutput,
)
self.df_train = df
self.model = self.model.fit(X, y)
self.shape = df.shape
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
future_regressor (pandas.DataFrame or Series): Datetime Indexed
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
index = self.create_forecast_index(forecast_length=forecast_length)
X = date_part(index, method=self.datepart_method)
if self.regression_type in ['User', 'user']:
X = pd.concat([X, future_regressor], axis=1)
X.columns = [str(xc) for xc in X.columns]
forecast = pd.DataFrame(self.model.predict(X))
forecast.columns = self.column_names
forecast.index = index
if just_point_forecast:
return forecast
else:
upper_forecast, lower_forecast = Point_to_Probability(
self.df_train,
forecast,
method='inferred_normal',
prediction_interval=self.prediction_interval,
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
model_choice = generate_regressor_params(model_dict=datepart_model_dict)
datepart_choice = random.choices(
["recurring", "simple", "expanded"], [0.4, 0.3, 0.3]
)[0]
if "regressor" in method:
regression_choice = "User"
else:
regression_choice = random.choices([None, 'User'], [0.7, 0.3])[0]
parameter_dict = {
'regression_model': model_choice,
'datepart_method': datepart_choice,
'regression_type': regression_choice,
}
return parameter_dict
def get_params(self):
"""Return dict of current parameters."""
parameter_dict = {
'regression_model': self.regression_model,
'datepart_method': self.datepart_method,
'regression_type': self.regression_type,
}
return parameter_dict
class UnivariateRegression(ModelObject):
"""Regression-framed approach to forecasting using sklearn.
A univariate version of rolling regression: ie each series is modeled independently
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
holiday (bool): If true, include holiday flags
regression_type (str): type of regression (None, 'User')
"""
def __init__(
self,
name: str = "UnivariateRegression",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
holiday_country: str = 'US',
verbose: int = 0,
random_seed: int = 2020,
forecast_length: int = 7,
regression_model: dict = {
"model": 'ExtraTrees',
"model_params": {},
},
holiday: bool = False,
mean_rolling_periods: int = 30,
macd_periods: int = None,
std_rolling_periods: int = 7,
max_rolling_periods: int = 7,
min_rolling_periods: int = 7,
ewm_var_alpha: float = None,
ewm_alpha: float = 0.5,
additional_lag_periods: int = 7,
abs_energy: bool = False,
rolling_autocorr_periods: int = None,
add_date_part: str = None,
polynomial_degree: int = None,
x_transform: str = None,
window: int = None,
n_jobs: int = -1,
**kwargs,
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
regression_type=regression_type,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.forecast_length = forecast_length
self.regression_model = regression_model
self.holiday = holiday
self.mean_rolling_periods = mean_rolling_periods
if mean_rolling_periods is None:
self.macd_periods = None
else:
self.macd_periods = macd_periods
self.std_rolling_periods = std_rolling_periods
self.max_rolling_periods = max_rolling_periods
self.min_rolling_periods = min_rolling_periods
self.ewm_var_alpha = ewm_var_alpha
self.ewm_alpha = ewm_alpha
self.additional_lag_periods = additional_lag_periods
self.abs_energy = abs_energy
self.rolling_autocorr_periods = rolling_autocorr_periods
self.add_date_part = add_date_part
self.polynomial_degree = polynomial_degree
self.x_transform = x_transform
self.window = window
def _x_transformer(self):
if self.x_transform == 'FastICA':
from sklearn.decomposition import FastICA
x_transformer = FastICA(n_components=None, random_state=2020, whiten=True)
elif self.x_transform == 'Nystroem':
from sklearn.kernel_approximation import Nystroem
half_size = int(self.sktraindata.shape[0] / 2) + 1
max_comp = 200
n_comp = max_comp if half_size > max_comp else half_size
x_transformer = Nystroem(
kernel='rbf', gamma=0.2, random_state=2020, n_components=n_comp
)
else:
# self.x_transform = 'RmZeroVariance'
from sklearn.feature_selection import VarianceThreshold
x_transformer = VarianceThreshold(threshold=0.0)
return x_transformer
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
future_regressor (pandas.DataFrame or Series): Datetime Indexed
"""
df = self.basic_profile(df)
self.sktraindata = df
# if external regressor, do some check up
if self.regression_type is not None:
if future_regressor is None:
raise ValueError(
"regression_type='User' but not future_regressor supplied."
)
elif future_regressor.shape[0] != df.shape[0]:
raise ValueError(
"future_regressor shape does not match training data shape."
)
else:
self.regressor_train = future_regressor
cols = self.sktraindata.columns
def forecast_by_column(self, args, parallel, n_jobs, col):
"""Run one series and return prediction."""
base = | pd.DataFrame(self.sktraindata[col]) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero", downcast="infer")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic", downcast="infer")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_invalid_method_and_value(self):
# GH#36624
ser = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Cannot pass both fill_value and method"
with pytest.raises(ValueError, match=msg):
ser.interpolate(fill_value=3, method="pad")
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"method, limit_direction, expected",
[
("pad", "backward", "forward"),
("ffill", "backward", "forward"),
("backfill", "forward", "backward"),
("bfill", "forward", "backward"),
("pad", "both", "forward"),
("ffill", "both", "forward"),
("backfill", "both", "backward"),
("bfill", "both", "backward"),
],
)
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
# https://github.com/pandas-dev/pandas/pull/34746
s = Series([1, 2, 3])
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series(
[1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]
)
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
tm.assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan])
result = s.interpolate(method="linear", limit=1, limit_direction="forward")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='cc,ee')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='dd')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='peanut!')
def test_filter_seqs_include_exact_match(self):
seqs = | pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2']) | pandas.Series |
import re
from functools import partial
import numpy as np
import pandas as pd
import pytest
from attr import asdict
from footings.data_dictionary import (
Column,
DataDictionary,
PandasDtype,
data_dictionary,
def_column,
)
from footings.exceptions import (
DataDictionaryPandasDtypeConversionError,
DataDictionaryValidateError,
DataDictionaryValidatorsConversionError,
)
from footings.model import model
from footings.validators import equal_to
def clean_str(x):
return re.sub(r"\s+", "", x, flags=re.UNICODE)
class TestColumn:
"""Test Column class"""
def test_instance(self):
assert isinstance(Column(name="COL"), Column)
def test_kw_only(self):
with pytest.raises(TypeError):
Column("COL")
def test_attributes(self):
col1 = Column(name="COL")
assert col1.dtype is None
assert col1.description is None
assert col1.validator == []
assert col1.metadata == {}
col2 = Column(name="COL", dtype="bool", description="COL description...")
assert col2.dtype is PandasDtype.Bool
assert col2.description == "COL description..."
assert col2.validator == []
assert col2.metadata == {}
def test_dtype(self):
# test converter_pandas_dtype
assert Column(name="COL").dtype is None
assert Column(name="COL", dtype="bool").dtype is PandasDtype.Bool
assert Column(name="COL", dtype="Bool").dtype is PandasDtype.Bool
assert Column(name="COL", dtype=PandasDtype.Bool).dtype is PandasDtype.Bool
with pytest.raises(DataDictionaryPandasDtypeConversionError):
Column(name="COL", dtype="x")
def test_validator(self):
# test converter_validators
def val1():
pass
def val2():
pass
assert Column(name="COL", validator=val1).validator == [val1]
assert Column(name="COL", validator=[val1]).validator == [val1]
assert Column(name="COL", validator=[val1, val2]).validator == [val1, val2]
with pytest.raises(NotImplementedError):
# lookup validator not implemented yet
Column(name="COL", validator="get_validator")
with pytest.raises(DataDictionaryValidatorsConversionError):
# validator needs to be callable
Column(name="COL", validator=1)
def test_metadata(self):
meta = {"extra": "info"}
assert Column(name="COL", metadata=meta).metadata == meta
with pytest.raises(TypeError):
Column(name="COL", metadata="x") # metadata needs to be mapping
def test_def_column():
col = def_column(dtype="bool")
assert isinstance(col, partial)
assert col.keywords == {
"dtype": "bool",
"description": None,
"validator": [],
"metadata": {},
}
@pytest.fixture(scope="session", name="DD")
def dd_success():
@data_dictionary
class TestDD:
COL1 = def_column(
dtype="int64", description="This is column 1.", validator=equal_to(1)
)
COL2 = def_column(dtype="str", description="This is column 2.")
return TestDD
@pytest.fixture(scope="session")
def df_correct():
df = pd.DataFrame(
{
"COL1": pd.Series([1, 1, 1], dtype="int64"),
"COL2": pd.Series(["a", "b", "c"], dtype="string"),
}
)
return df
@pytest.fixture(scope="session")
def df_missing_column():
df = pd.DataFrame({"COL1": pd.Series([1, 1, 1], dtype="int64")})
return df
@pytest.fixture(scope="session")
def df_extra_column():
df = pd.DataFrame(
{
"COL1": pd.Series([1, 1, 1], dtype="int64"),
"COL2": pd.Series(["a", "b", "c"], dtype="string"),
"COL3": pd.Series([1, 1, 1], dtype="int64"),
}
)
return df
@pytest.fixture(scope="session")
def df_wrong_type():
df = pd.DataFrame(
{
"COL1": pd.Series([1, 1, 1], dtype="int64"),
"COL2": pd.Series([1, 1, 1], dtype="int64"),
}
)
return df
@pytest.fixture(scope="session")
def df_fail_validator():
df = pd.DataFrame(
{
"COL1": pd.Series([1, 1, 2], dtype="int64"),
"COL2": | pd.Series(["a", "b", "c"], dtype="string") | pandas.Series |
import pandas
from skbio.stats.composition import clr
from scipy.stats import mannwhitneyu
from scipy.stats import kruskal
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
df= | pandas.read_csv("./../../../Data_21Dec20/species_data.csv",index_col=0) | pandas.read_csv |
import inspect
import functools
import os
import warnings
warnings.filterwarnings('ignore', message='numpy.dtype size changed')
warnings.filterwarnings('ignore', message='regionprops and image moments')
warnings.filterwarnings('ignore', message='non-tuple sequence for multi')
warnings.filterwarnings('ignore', message='precision loss when converting')
import numpy as np
import pandas as pd
import skimage
import ops.features
import ops.process
import ops.io
import ops.in_situ
import ops.io_hdf
from ops.process import Align
from scipy.stats import mode
from ops.constants import *
from itertools import combinations, permutations, product
import ops.cp_emulator
class Snake():
"""Container class for methods that act directly on data (names start with
underscore) and methods that act on arguments from snakemake (e.g., filenames
provided instead of image and table data). The snakemake methods (no underscore)
are automatically loaded by `Snake.load_methods`.
"""
@staticmethod
def _apply_illumination_correction(data, correction=None, rolling_ball=False, rolling_ball_kwargs={},
n_jobs=1, backend='threading'):
if n_jobs == 1:
if correction is not None:
data = (data/correction).astype(np.uint16)
if rolling_ball:
data = ops.process.subtract_background(data,**rolling_ball_kwargs).astype(np.uint16)
return data
else:
return ops.utils.applyIJ_parallel(Snake._apply_illumination_correction,
arr=data,
correction=correction,
backend=backend,
n_jobs=n_jobs
)
@staticmethod
def _align_SBS(data, method='DAPI', upsample_factor=2, window=2, cutoff=1, q_norm=70,
align_within_cycle=True, cycle_files=None, keep_extras=False, n=1, remove_for_cycle_alignment=None):
"""Rigid alignment of sequencing cycles and channels.
Parameters
----------
data : np.ndarray or list of np.ndarrays
Unaligned SBS image with dimensions (CYCLE, CHANNEL, I, J) or list of single cycle
SBS images, each with dimensions (CHANNEL, I, J)
method : {'DAPI','SBS_mean'}
upsample_factor : int, default 2
Subpixel alignment is done if `upsample_factor` is greater than one (can be slow).
window : int or float, default 2
A centered subset of data is used if `window` is greater than one.
cutoff : int or float, default 1
Cutoff for normalized data to help deal with noise in images.
q_norm : int, default 70
Quantile for normalization to help deal with noise in images.
align_within_cycle : bool
Align sbs channels within cycles.
cycle_files : list of int or None, default None
Used for parsing sets of images where individual channels are in separate files, which
is more typically handled in a preprocessing step to combine images from the same cycle
keep_extras : bool, default False
Retain channels that are not common across all cycles by propagating each 'extra' channel
to all cycles. Ignored if same number of channels exist for all cycles.
n : int, default 1
Determines the first SBS channel in `data`. This is after dealing with `keep_extras`, so
should only account for channels in common across all cycles if `keep_extras`=False.
remove_for_cycle_alignment : None or int, default int
Channel index to remove when finding cycle offsets. This is after dealing with `keep_extras`,
so should only account for channels in common across all cycles if `keep_extras`=False.
Returns
-------
aligned : np.ndarray
SBS image aligned across cycles.
"""
if cycle_files is not None:
arr = []
# snakemake passes de-nested list of numpy arrays
current = 0
for cycle in cycle_files:
if cycle == 1:
arr.append(data[current])
else:
arr.append(np.array(data[current:current+cycle]))
current += cycle
data = np.array(arr)
else:
# TODO: creating a "ragged" array here is deprecated in numpy
data = np.array(data)
# if number of channels varies across cycles
if data.ndim==1:
# start by only keeping channels in common
channels = [len(x) for x in data]
stacked = np.array([x[-min(channels):] for x in data])
# add back in extra channels if requested
if keep_extras==True:
extras = np.array(channels)-min(channels)
arr = []
for cycle,extra in enumerate(extras):
if extra != 0:
arr.extend([data[cycle][extra_ch] for extra_ch in range(extra)])
propagate = np.array(arr)
stacked = np.concatenate((np.array([propagate]*stacked.shape[0]),stacked),axis=1)
else:
extras = [0,]*stacked.shape[0]
else:
stacked = data
extras = [0,]*stacked.shape[0]
assert stacked.ndim == 4, 'Input data must have dimensions CYCLE, CHANNEL, I, J'
# align between SBS channels for each cycle
aligned = stacked.copy()
if align_within_cycle:
align_it = lambda x: Align.align_within_cycle(x, window=window, upsample_factor=upsample_factor)
aligned[:, n:] = np.array([align_it(x) for x in aligned[:, n:]])
if method == 'DAPI':
# align cycles using the DAPI channel
aligned = Align.align_between_cycles(aligned, channel_index=0,
window=window, upsample_factor=upsample_factor)
elif method == 'SBS_mean':
# calculate cycle offsets using the average of SBS channels
sbs_channels = list(range(n,aligned.shape[1]))
if remove_for_cycle_alignment != None:
sbs_channels.remove(remove_for_cycle_alignment)
target = Align.apply_window(aligned[:, sbs_channels], window=window).max(axis=1)
normed = Align.normalize_by_percentile(target, q_norm=q_norm)
normed[normed > cutoff] = cutoff
offsets = Align.calculate_offsets(normed, upsample_factor=upsample_factor)
# apply cycle offsets to each channel
for channel in range(aligned.shape[1]):
if channel >= sum(extras):
aligned[:, channel] = Align.apply_offsets(aligned[:, channel], offsets)
else:
# don't apply offsets to extra channel in the cycle it was acquired
extra_idx = list(np.cumsum(extras)>channel).index(True)
extra_offsets = np.array([offsets[extra_idx],]*aligned.shape[0])
aligned[:,channel] = Align.apply_offsets(aligned[:,channel],extra_offsets)
else:
raise ValueError(f'method "{method}" not implemented')
return aligned
@staticmethod
def _align_by_DAPI(data_1, data_2, channel_index=0, upsample_factor=2):
"""Align the second image to the first, using the channel at position
`channel_index`. If channel_index is a tuple of length 2, specifies channels of [data_1,data_2]
to use for alignment.The first channel is usually DAPI.
"""
if isinstance(channel_index,tuple):
assert len(channel_index)==2, 'channel_index must either by an integer or tuple of length 2'
channel_index_1,channel_index_2 = channel_index
else:
channel_index_1,channel_index_2 = (channel_index,)*2
images = data_1[channel_index_1], data_2[channel_index_2]
_, offset = ops.process.Align.calculate_offsets(images, upsample_factor=upsample_factor)
offsets = [offset] * len(data_2)
aligned = ops.process.Align.apply_offsets(data_2, offsets)
return aligned
@staticmethod
def _align_phenotype_channels(data,target,source,riders=[],upsample_factor=2, window=2, remove=False):
windowed = Align.apply_window(data[[target,source]],window)
# remove noise?
offsets = Align.calculate_offsets(windowed,upsample_factor=upsample_factor)
if not isinstance(riders,list):
riders = [riders]
full_offsets = np.zeros((data.shape[0],2))
full_offsets[[source]+riders] = offsets[1]
aligned = Align.apply_offsets(data, full_offsets)
if remove == 'target':
channel_order = list(range(data.shape[0]))
channel_order.remove(source)
channel_order.insert(target+1,source)
aligned = aligned[channel_order]
aligned = remove_channels(aligned, target)
elif remove == 'source':
aligned = remove_channels(aligned, source)
return aligned
@staticmethod
def _stack_channels(data):
arr = []
for dataset in data:
if len(dataset.shape)>2:
arr.extend([dataset[...,channel,:,:] for channel in range(dataset.shape[-3])])
else:
arr.append(dataset)
return np.stack(arr,axis=-3)
@staticmethod
def _segment_nuclei(data, threshold, area_min, area_max, smooth=1.35, radius=15):
"""Find nuclei from DAPI. Find cell foreground from aligned but unfiltered
data. Expects data to have shape (CHANNEL, I, J).
"""
if isinstance(data, list):
dapi = data[0].astype(np.uint16)
elif data.ndim == 3:
dapi = data[0].astype(np.uint16)
else:
dapi = data.astype(np.uint16)
kwargs = dict(threshold=lambda x: threshold,
area_min=area_min, area_max=area_max,
smooth=smooth, radius=radius)
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nuclei = ops.process.find_nuclei(dapi, **kwargs)
return nuclei.astype(np.uint16)
@staticmethod
def _segment_nuclei_stack(data, threshold, area_min, area_max, smooth=1.35, radius=15, n_jobs=1, backend='threading',tqdm=False):
"""Find nuclei from a nuclear stain (e.g., DAPI). Expects data to have shape (I, J)
(segments one image) or (N, I, J) (segments a series of nuclear stain images).
"""
kwargs = dict(threshold=lambda x: threshold,
area_min=area_min, area_max=area_max,
smooth=smooth, radius=radius)
if n_jobs==1:
find_nuclei = ops.utils.applyIJ(ops.process.find_nuclei)
else:
kwargs['n_jobs']=n_jobs
kwargs['tqdm']=tqdm
find_nuclei = functools.partial(ops.utils.applyIJ_parallel,ops.process.find_nuclei,backend=backend)
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nuclei = find_nuclei(data, **kwargs)
return nuclei.astype(np.uint16)
@staticmethod
def _segment_cells(data, nuclei, threshold, add_nuclei=True):
"""Segment cells from aligned data. Matches cell labels to nuclei labels.
Note that labels can be skipped, for example if cells are touching the
image boundary.
"""
if data.ndim == 4:
# no DAPI, min over cycles, mean over channels
mask = data[:, 1:].min(axis=0).mean(axis=0)
elif data.ndim == 3:
mask = np.median(data[1:], axis=0)
elif data.ndim == 2:
mask = data
else:
raise ValueError
mask = mask > threshold
# at least get nuclei shape in cells image -- helpful for mapping reads to cells
# at edge of FOV
if add_nuclei:
mask += nuclei.astype(bool)
try:
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cells = ops.process.find_cells(nuclei, mask)
except ValueError:
print('segment_cells error -- no cells')
cells = nuclei
return cells
@staticmethod
def _segment_cells_robust(data, channel, nuclei, background_offset, background_quantile=0.05,
smooth=None, erosion=None, add_nuclei=True, mask_dilation=5):
# find region where all channels are valid, e.g. after applying offsets to align channels
mask = data.min(axis=0)>0
if smooth is not None:
image = skimage.filters.gaussian(data[channel],smooth,preserve_range=True).astype(data.dtype)
else:
image = data[channel]
threshold = np.quantile(image[mask],background_quantile) + background_offset
semantic = image > threshold
if add_nuclei:
semantic += nuclei.astype(bool)
if erosion is not None:
semantic[~mask] = True
semantic = skimage.morphology.binary_erosion(semantic, skimage.morphology.disk(erosion/2))
semantic[~mask] = False
if add_nuclei:
semantic += nuclei.astype(bool)
labels = ops.process.find_cells(nuclei,semantic)
def remove_border(labels, mask, dilate=mask_dilation):
mask = skimage.morphology.binary_dilation(mask,np.ones((dilate,dilate)))
remove = np.unique(labels[mask])
labels = labels.copy()
labels.flat[np.in1d(labels,remove)] = 0
return labels
return remove_border(labels,~mask)
@staticmethod
def _segment_cellpose(data, dapi_index, cyto_index, nuclei_diameter, cell_diameter, cellpose_kwargs=dict()):
from ops.cellpose import segment_cellpose_rgb#,segment_cellpose
# return segment_cellpose(data[dapi_index], data[cyto_index],
# nuclei_diameter=diameter, cell_diameter=diameter)
rgb = Snake._prepare_cellpose(data, dapi_index, cyto_index)
nuclei, cells = segment_cellpose_rgb(rgb, nuclei_diameter, cell_diameter, **cellpose_kwargs)
return nuclei, cells
@staticmethod
def _prepare_cellpose(data, dapi_index, cyto_index, logscale=True):
"""Export three-channel RGB image for use with cellpose GUI (e.g., to select
cell diameter). Nuclei are exported to blue (cellpose channel=3), cytoplasm to
green (cellpose channel=2).
Unfortunately the cellpose GUI sometimes has issues loading tif files, so this
exports to PNG, which has limited dynamic range. Cellpose performs internal
scaling based on 10th and 90th percentiles of the input.
"""
from ops.cellpose import image_log_scale
from skimage import img_as_ubyte
dapi = data[dapi_index]
cyto = data[cyto_index]
blank = np.zeros_like(dapi)
if logscale:
cyto = image_log_scale(cyto)
cyto = cyto/cyto.max() # for ubyte conversion
dapi_upper = np.percentile(dapi, 99.5)
dapi = dapi / dapi_upper
dapi[dapi > 1] = 1
red, green, blue = img_as_ubyte(blank), img_as_ubyte(cyto), img_as_ubyte(dapi)
return np.array([red, green, blue]).transpose([1, 2, 0])
# @staticmethod
# def _segment_cells_tubulin(data, nuclei, threshold, area_min, area_max, radius=15,
# method='otsu', tubulin_channel=1, remove_boundary_cells=False, **kwargs):
# """Segment cells from aligned data. Matches cell labels to nuclei labels.
# Note that labels can be skipped, for example if cells are touching the
# image boundary.
# """
# if data.ndim == 3:
# tubulin = data[tubulin_channel].astype(np.uint16)
# elif data.ndim == 2:
# tubulin = data.astype(np.uint16)
# else:
# raise ValueError('input image has more than 3 dimensions')
# kwargs = dict(threshold=threshold,
# area_min=area_min,
# area_max=area_max,
# radius=radius,
# method=method)
# kwargs.update(**kwargs)
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# mask = ops.process.find_tubulin_background(tubulin,nuclei,**kwargs)
# try:
# # skimage precision warning
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# cells = ops.process.find_cells(nuclei,mask,remove_boundary_cells=remove_boundary_cells)
# except ValueError:
# print('segment_cells error -- no cells')
# cells = nuclei
# return cells
@staticmethod
def _transform_log(data, sigma=1, skip_index=None):
"""Apply Laplacian-of-Gaussian filter from scipy.ndimage.
Use `skip_index` to skip transforming a channel (e.g., DAPI with `skip_index=0`).
"""
data = np.array(data)
loged = ops.process.log_ndi(data, sigma=sigma)
if skip_index is not None:
loged[..., skip_index, :, :] = data[..., skip_index, :, :]
return loged
@staticmethod
def _compute_std(data, remove_index=None):
"""Use standard deviation to estimate sequencing read locations.
"""
if remove_index is not None:
data = remove_channels(data, remove_index)
# for 1-cycle experiments
if len(data.shape)==3:
data = data[:,None,...]
# leading_dims = tuple(range(0, data.ndim - 2))
# consensus = np.std(data, axis=leading_dims)
consensus = np.std(data, axis=0).mean(axis=0)
return consensus
@staticmethod
def _find_peaks(data, width=5, remove_index=None):
"""Find local maxima and label by difference to next-highest neighboring
pixel.
"""
if remove_index is not None:
data = remove_channels(data, remove_index)
if data.ndim == 2:
data = [data]
peaks = [ops.process.find_peaks(x, n=width)
if x.max() > 0 else x
for x in data]
peaks = np.array(peaks).squeeze()
return peaks
@staticmethod
def _max_filter(data, width, remove_index=None):
"""Apply a maximum filter in a window of `width`.
"""
import scipy.ndimage.filters
if data.ndim == 2:
data = data[None, None]
if data.ndim == 3:
data = data[None]
if remove_index is not None:
data = remove_channels(data, remove_index)
maxed = scipy.ndimage.filters.maximum_filter(data, size=(1, 1, width, width))
return maxed
@staticmethod
def _extract_bases(maxed, peaks, cells, threshold_peaks, wildcards, bases='GTAC'):
"""Find the signal intensity from `maxed` at each point in `peaks` above
`threshold_peaks`. Output is labeled by `wildcards` (e.g., well and tile) and
label at that position in integer mask `cells`.
"""
if maxed.ndim == 3:
maxed = maxed[None]
if len(bases) != maxed.shape[1]:
error = 'Sequencing {0} bases {1} but maxed data had shape {2}'
raise ValueError(error.format(len(bases), bases, maxed.shape))
# "cycle 0" is reserved for phenotyping
cycles = list(range(1, maxed.shape[0] + 1))
bases = list(bases)
values, labels, positions = (
ops.in_situ.extract_base_intensity(maxed, peaks, cells, threshold_peaks))
df_bases = ops.in_situ.format_bases(values, labels, positions, cycles, bases)
for k,v in sorted(wildcards.items()):
df_bases[k] = v
return df_bases
@staticmethod
def _call_reads(df_bases, correction_quartile=0, peaks=None, correction_only_in_cells=True, correction_by_cycle=False, subtract_channel_min=False):
"""Median correction performed independently for each tile.
Use the `correction_only_in_cells` flag to specify if correction
is based on reads within cells, or all reads.
"""
df_bases = df_bases.copy()
if df_bases is None:
return
if correction_only_in_cells:
if len(df_bases.query('cell > 0')) == 0:
return
if subtract_channel_min:
df_bases['intensity'] = df_bases['intensity'] - df_bases.groupby([WELL,TILE,CELL,READ,CHANNEL])['intensity'].transform('min')
cycles = len(set(df_bases['cycle']))
channels = len(set(df_bases['channel']))
df_reads = (df_bases
.pipe(ops.in_situ.clean_up_bases)
.pipe(ops.in_situ.do_median_call, cycles, channels=channels,
correction_only_in_cells=correction_only_in_cells,
correction_by_cycle=correction_by_cycle,
correction_quartile=correction_quartile)
)
if peaks is not None:
i, j = df_reads[['i', 'j']].values.T
df_reads['peak'] = peaks[i, j]
return df_reads
@staticmethod
def _call_cells(df_reads, df_pool=None,q_min=0):
"""Median correction performed independently for each tile.
"""
if df_reads is None:
return
if df_pool is None:
return (df_reads
.query('Q_min >= @q_min')
.pipe(ops.in_situ.call_cells))
else:
prefix_length = len(df_reads.iloc[0].barcode) # get the experimental prefix length, i.e., the number of completed SBS cycles
df_pool[PREFIX] = df_pool.apply(lambda x: x.sgRNA[:prefix_length],axis=1)
return (df_reads
.query('Q_min >= @q_min')
.pipe(ops.in_situ.call_cells_mapping,df_pool))
@staticmethod
def _extract_features(data, labels, wildcards, features=None,multichannel=False):
"""Extracts features in dictionary and combines with generic region
features.
"""
from ops.features import features_basic
features = features.copy() if features else dict()
features.update(features_basic)
if multichannel:
from ops.process import feature_table_multichannel as feature_table
else:
from ops.process import feature_table
df = feature_table(data, labels, features)
for k,v in sorted(wildcards.items()):
df[k] = v
return df
@staticmethod
def _extract_timelapse_features(data, labels, wildcards, features=None):
"""Extracts features in dictionary and combines with generic region
features.
"""
arr = []
for i, (frame, labels_frame) in enumerate(zip(np.squeeze(data), np.squeeze(labels))):
arr += [(Snake._extract_features(frame, labels_frame, wildcards, features=features)
.assign(frame=i))]
return pd.concat(arr).rename(columns={'label':'cell'})
@staticmethod
def _extract_features_bare(data, labels, features=None, wildcards=None, multichannel=False):
"""Extracts features in dictionary and combines with generic region
features.
"""
from ops.process import feature_table
features = features.copy() if features else dict()
features.update({'label': lambda r: r.label})
if multichannel:
from ops.process import feature_table_multichannel as feature_table
else:
from ops.process import feature_table
df = feature_table(data, labels, features)
if wildcards is not None:
for k,v in sorted(wildcards.items()):
df[k] = v
return df
@staticmethod
def _extract_phenotype_nuclei_cells(data_phenotype, nuclei, cells, features_n, features_c, wildcards):
if (nuclei.max() == 0) or (cells.max() == 0):
return
import ops.features
features_n = {k + '_nuclear': v for k,v in features_n.items()}
features_c = {k + '_cell': v for k,v in features_c.items()}
features_c.update({'area': lambda r: r.area})
df_n = (Snake._extract_features(data_phenotype, nuclei, wildcards, features_n)
.rename(columns={'area': 'area_nuclear'}))
df_c = (Snake._extract_features_bare(data_phenotype, cells, wildcards, features_c)
.drop(['i', 'j'], axis=1).rename(columns={'area': 'area_cell'}))
# inner join discards nuclei without corresponding cells
df = (pd.concat([df_n.set_index('label'), df_c.set_index('label')], axis=1, join='inner')
.reset_index())
return (df
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_FR(data_phenotype, nuclei, wildcards):
"""Features for frameshift reporter phenotyped in DAPI, HA channels.
"""
from ops.features import features_frameshift
return (Snake._extract_features(data_phenotype, nuclei, wildcards, features_frameshift)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_FR_myc(data_phenotype, nuclei, wildcards):
"""Features for frameshift reporter phenotyped in DAPI, HA, myc channels.
"""
from ops.features import features_frameshift_myc
return (Snake._extract_features(data_phenotype, nuclei, wildcards, features_frameshift_myc)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_translocation(data_phenotype, nuclei, cells, wildcards):
if (nuclei.max() == 0) or (cells.max() == 0):
return
import ops.features
features_n = ops.features.features_translocation_nuclear
features_c = ops.features.features_translocation_cell
features_n = {k + '_nuclear': v for k,v in features_n.items()}
features_c = {k + '_cell': v for k,v in features_c.items()}
df_n = (Snake._extract_features(data_phenotype, nuclei, wildcards, features_n)
.rename(columns={'area': 'area_nuclear'}))
df_c = (Snake._extract_features(data_phenotype, cells, wildcards, features_c)
.drop(['i', 'j'], axis=1).rename(columns={'area': 'area_cell'}))
# inner join discards nuclei without corresponding cells
df = (pd.concat([df_n.set_index('label'), df_c.set_index('label')], axis=1, join='inner')
.reset_index())
return (df
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_translocation_live(data, nuclei, wildcards):
def _extract_phenotype_translocation_simple(data, nuclei, wildcards):
import ops.features
features = ops.features.features_translocation_nuclear_simple
return (Snake._extract_features(data, nuclei, wildcards, features)
.rename(columns={'label': 'cell'}))
extract = _extract_phenotype_translocation_simple
arr = []
for i, (frame, nuclei_frame) in enumerate(zip(data, nuclei)):
arr += [extract(frame, nuclei_frame, wildcards).assign(frame=i)]
return pd.concat(arr)
@staticmethod
def _extract_phenotype_translocation_ring(data_phenotype, nuclei, wildcards, width=3):
selem = np.ones((width, width))
perimeter = skimage.morphology.dilation(nuclei, selem)
perimeter[nuclei > 0] = 0
inside = skimage.morphology.erosion(nuclei, selem)
inner_ring = nuclei.copy()
inner_ring[inside > 0] = 0
return (Snake._extract_phenotype_translocation(data_phenotype, inner_ring, perimeter, wildcards)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_minimal(data_phenotype, nuclei, wildcards):
return (Snake._extract_features(data_phenotype, nuclei, wildcards, dict())
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_geom(labels, wildcards):
from ops.features import features_geom
return Snake._extract_features(labels, labels, wildcards, features_geom)
@staticmethod
def _extract_simple_nuclear_morphology(data_phenotype, nuclei, wildcards):
import ops.morphology_features
df = (Snake._extract_features(data_phenotype, nuclei, wildcards, ops.morphology_features.features_nuclear)
.rename(columns={'label':'cell'})
)
return df
@staticmethod
def _extract_phenotype_cp_old(data_phenotype, nuclei, cells, wildcards, nucleus_channels='all', cell_channels='all', channel_names=['dapi','tubulin','gh2ax','phalloidin']):
if nucleus_channels == 'all':
try:
nucleus_channels = list(range(data_phenotype.shape[-3]))
except:
nucleus_channels = [0]
if cell_channels == 'all':
try:
cell_channels = list(range(data_phenotype.shape[-3]))
except:
cell_channels = [0]
dfs = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data_phenotype = data_phenotype.astype(np.uint16)
# nucleus shape
dfs.append(Snake._extract_features(nuclei,nuclei,wildcards,ops.cp_emulator.shape_features)
.rename(columns=ops.cp_emulator.shape_columns)
.set_index('label')
.rename(columns = lambda x: 'nucleus_'+x if x not in wildcards.keys() else x)
)
# cell shape
dfs.append(Snake._extract_features_bare(cells,cells,ops.cp_emulator.shape_features)
.rename(columns=ops.cp_emulator.shape_columns)
.set_index('label')
.add_prefix('cell_')
)
# nucleus grayscale channel features
dfs.extend([(Snake._extract_features_bare(data_phenotype[...,channel,:,:],nuclei,ops.cp_emulator.grayscale_features)
.rename(columns=ops.cp_emulator.grayscale_columns)
.set_index('label')
.add_prefix(f'nucleus_{channel_names[channel]}_')
)
for channel in nucleus_channels]
)
# cell grayscale channel features
dfs.extend([(Snake._extract_features_bare(data_phenotype[...,channel,:,:],cells,ops.cp_emulator.grayscale_features)
.rename(columns=ops.cp_emulator.grayscale_columns)
.set_index('label')
.add_prefix(f'cell_{channel_names[channel]}_')
)
for channel in cell_channels]
)
# generate correlation column names
## nucleus
nucleus_correlation_columns = {
'colocalization_{}'.format(inner_num+outer_num*len(ops.cp_emulator.colocalization_columns))
:col.format(first=channel_names[first],second=channel_names[second])
for outer_num,(first,second) in enumerate(combinations(nucleus_channels,2))
for inner_num,col in enumerate(ops.cp_emulator.colocalization_columns)
}
nucleus_correlation_columns.update({
'correlation_{}'.format(num)
:ops.cp_emulator.correlation_columns[0].format(first=channel_names[first],second=channel_names[second])
for num,(first,second) in enumerate(combinations(nucleus_channels,2))
})
nucleus_correlation_columns.update({
'lstsq_slope_{}'.format(num)
:ops.cp_emulator.correlation_columns[1].format(first=channel_names[first],second=channel_names[second])
for num,(first,second) in enumerate(combinations(nucleus_channels,2))
})
## cell
cell_correlation_columns = {
'colocalization_{}'.format(inner_num+outer_num*len(ops.cp_emulator.colocalization_columns))
:col.format(first=channel_names[first],second=channel_names[second])
for outer_num,(first,second) in enumerate(combinations(cell_channels,2))
for inner_num,col in enumerate(ops.cp_emulator.colocalization_columns)
}
cell_correlation_columns.update({
'correlation_{}'.format(num)
:ops.cp_emulator.correlation_columns[0].format(first=channel_names[first],second=channel_names[second])
for num,(first,second) in enumerate(combinations(cell_channels,2))
})
cell_correlation_columns.update({
'lstsq_slope_{}'.format(num)
:ops.cp_emulator.correlation_columns[1].format(first=channel_names[first],second=channel_names[second])
for num,(first,second) in enumerate(combinations(cell_channels,2))
})
# nucleus channel correlations
dfs.append(Snake._extract_features_bare(data_phenotype[...,nucleus_channels,:,:],nuclei,ops.cp_emulator.correlation_features)
.rename(columns=nucleus_correlation_columns)
.set_index('label')
.add_prefix('nucleus_')
)
# cell channel correlations
dfs.append(Snake._extract_features_bare(data_phenotype[...,cell_channels,:,:],cells,ops.cp_emulator.correlation_features)
.rename(columns=cell_correlation_columns)
.set_index('label')
.add_prefix('cell_')
)
# nucleus neighbors
dfs.append(ops.cp_emulator.neighbor_measurements(nuclei,distances=[1])
.set_index('label')
.add_prefix('nucleus_')
)
# cell neighbors
dfs.append(ops.cp_emulator.neighbor_measurements(cells,distances=[1])
.set_index('label')
.add_prefix('cell_')
)
return pd.concat(dfs,axis=1,join='outer',sort=True).reset_index()
@staticmethod
def _extract_phenotype_cp_ch(data_phenotype, nuclei, cells, wildcards, nucleus_channels='all', cell_channels='all', channel_names=['dapi','tubulin','gh2ax','phalloidin']):
from functools import partial
if nucleus_channels == 'all':
try:
nucleus_channels = list(range(data_phenotype.shape[-3]))
except:
nucleus_channels = [0]
if cell_channels == 'all':
try:
cell_channels = list(range(data_phenotype.shape[-3]))
except:
cell_channels = [0]
dfs = []
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# data_phenotype = data_phenotype.astype(np.uint16)
nucleus_features = {}
nucleus_columns = {}
for ch in nucleus_channels:
nucleus_columns.update({k.format(channel=channel_names[ch]):v.format(channel=channel_names[ch])
for k,v in ops.cp_emulator.grayscale_columns_ch.items()
})
for name,func in ops.cp_emulator.grayscale_features_ch.items():
nucleus_features[f'{channel_names[ch]}_{name}'] = partial(func,ch=ch)
for first,second in combinations(list(range(len(nucleus_channels))),2):
nucleus_columns.update({k.format(first=channel_names[first],second=channel_names[second]):
v.format(first=channel_names[first],second=channel_names[second])
for k,v in ops.cp_emulator.colocalization_columns_ch.items()
})
for name,func in ops.cp_emulator.correlation_features_ch.items():
nucleus_features[f'{name}_{channel_names[first]}_{channel_names[second]}'] = partial(func,ch1=first,ch2=second)
nucleus_features.update(ops.cp_emulator.shape_features)
nucleus_columns.update(ops.cp_emulator.shape_columns)
cell_features = {}
cell_columns = {}
for ch in cell_channels:
cell_columns.update({k.format(channel=channel_names[ch]):v.format(channel=channel_names[ch])
for k,v in ops.cp_emulator.grayscale_columns_ch.items()
})
for name,func in ops.cp_emulator.grayscale_features_ch.items():
cell_features[f'{channel_names[ch]}_{name}'] = partial(func,ch=ch)
for first,second in combinations(list(range(len(cell_channels))),2):
cell_columns.update({k.format(first=channel_names[first],second=channel_names[second]):
v.format(first=channel_names[first],second=channel_names[second])
for k,v in ops.cp_emulator.colocalization_columns_ch.items()
})
for name,func in ops.cp_emulator.correlation_features_ch.items():
cell_features[f'{name}_{channel_names[first]}_{channel_names[second]}'] = partial(func,ch1=first,ch2=second)
cell_features.update(ops.cp_emulator.shape_features)
cell_columns.update(ops.cp_emulator.shape_columns)
# nucleus features
dfs.append(Snake._extract_features(data_phenotype[...,nucleus_channels,:,:],
nuclei,wildcards,nucleus_features)
.rename(columns=nucleus_columns)
.set_index('label')
.rename(columns = lambda x: 'nucleus_'+x if x not in wildcards.keys() else x)
)
# cell features
dfs.append(Snake._extract_features_bare(data_phenotype[...,cell_channels,:,:],
cells,cell_features)
.rename(columns=cell_columns)
.set_index('label')
.add_prefix('cell_')
)
# nucleus neighbors
dfs.append(ops.cp_emulator.neighbor_measurements(nuclei,distances=[1])
.set_index('label')
.add_prefix('nucleus_')
)
# cell neighbors
dfs.append(ops.cp_emulator.neighbor_measurements(cells,distances=[1])
.set_index('label')
.add_prefix('cell_')
)
return | pd.concat(dfs,axis=1,join='outer',sort=True) | pandas.concat |
import gym
import torch
import random
import numpy as np
from gym.spaces.box import Box
import contextlib
import csv
import json
import os
import os.path as osp
import time
from abc import ABC, abstractmethod
from collections import deque
from glob import glob
import numpy as np
from gym.core import Wrapper
from gym.spaces import Box, Dict, Discrete
from procgen import ProcgenEnv
class ProcgenVecEnvCustom():
def __init__(self,
env_name,
num_levels,
mode,
start_level,
paint_vel_info=False,
num_envs=32,
normalize_rewards=True,
device='cuda'):
env = ProcgenEnv(num_envs=num_envs,
env_name=env_name,
num_levels=num_levels,
start_level=start_level,
paint_vel_info=paint_vel_info,
distribution_mode=mode)
self.observation_space = Box(shape=(3, 64, 64), low=0, high=255)
self.action_space = Discrete(15)
self.num_envs = num_envs
self.device = device
env = VecExtractDictObs(env, "rgb")
env = VecMonitor(
venv=env,
filename=None,
keep_buf=100,
)
if normalize_rewards:
env = VecNormalize(venv=env, ob=False)
self.env = env
# self.env = EpisodeRewardWrapper(env)
self._max_episode_steps = 10_000
def reset(self):
o = self.env.reset()
o = (torch.from_numpy(o).to(self.device)).permute(0,3,1,2)
return o
def step(self, action):
o, r, x, info, = self.env.step(action.cpu().detach().numpy().squeeze(1))
o = (torch.from_numpy(o).to(self.device)).permute(0,3,1,2)
r = torch.from_numpy(r).unsqueeze(dim=1).float()
return o, r, x, info
def close(self):
self.env.close()
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, use_tf=False):
VecEnvWrapper.__init__(self, venv)
if use_tf:
from baselines.common.running_mean_std import TfRunningMeanStd
self.ob_rms = TfRunningMeanStd(
shape=self.observation_space.shape, scope='ob_rms') if ob else None
self.ret_rms = TfRunningMeanStd(
shape=(), scope='ret_rms') if ret else None
else:
self.ob_rms = RunningMeanStd(
shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var +
self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var +
self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset()
return self._obfilt(obs)
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
class VecMonitor(VecEnvWrapper):
def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()):
VecEnvWrapper.__init__(self, venv)
self.eprets = None
self.eplens = None
self.epcount = 0
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename, header={'t_start': self.tstart},
extra_keys=info_keywords)
else:
self.results_writer = None
self.info_keywords = info_keywords
self.keep_buf = keep_buf
if self.keep_buf:
self.epret_buf = deque([], maxlen=keep_buf)
self.eplen_buf = deque([], maxlen=keep_buf)
def reset(self):
obs = self.venv.reset()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.eprets += rews
self.eplens += 1
newinfos = list(infos[:])
for i in range(len(dones)):
if dones[i]:
info = infos[i].copy()
ret = self.eprets[i]
eplen = self.eplens[i]
epinfo = {'r': ret, 'l': eplen, 't': round(
time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
info['episode'] = epinfo
if self.keep_buf:
self.epret_buf.append(ret)
self.eplen_buf.append(eplen)
self.epcount += 1
self.eprets[i] = 0
self.eplens[i] = 0
if self.results_writer:
self.results_writer.write_row(epinfo)
newinfos[i] = info
return obs, rews, dones, newinfos
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename,
header={"t_start": time.time(
), 'env_id': env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
# extra info about the current episode, that was passed in during reset()
self.current_reset_info = {}
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError(
'Expected you to pass kwarg %s into reset' % k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen,
"t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(
self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
class VecExtractDictObs(VecEnvObservationWrapper):
def __init__(self, venv, key):
self.key = key
super().__init__(venv=venv,
observation_space=venv.observation_space.spaces[self.key])
def process(self, obs):
return obs[self.key]
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError(
"no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = | pandas.concat(dfs) | pandas.concat |
import numpy as np
from numpy.core.numeric import _rollaxis_dispatcher
import pandas as pd
from pymbar import BAR as BAR_
from pymbar import MBAR as MBAR_
from alchemlyb.estimators import MBAR
from sklearn.base import BaseEstimator
import copy
import re
import itertools
import logging
logger = logging.getLogger(__name__)
class Estimators():
"""
Return Estimated binding free energy (dG).
Returns the dG between state A and state B using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
"""
def Zwanzig(dEs,steps):
"""
Return the estimated binding free energy using Zwanzig estimator.
Computes the binding free (dG) form molecular dynamics simulation
between state A and state B using Zwanzig estimator.
Parameters
----------
dEs : Pandas Dataframe
contains the reduced potentail (dE) between the states.
steps : interger
the number of the steps to be included in the calculation, set to "None" if all steps are needed.
Returns
---------
Zwanzig_df : Pandas Dataframe
contains the binding free energy (dG) between the states.
Examples
--------
>>> Zwanzig(dEs,None)
>>> Zwanzig(dEs,1000)
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:steps]/0.592))))
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))-pd.DataFrame(dGR[1:][::-1]))/2
for i in range(len(list(dG_average_raw.values))):
dG_Average.append(np.sum(dG_average_raw.values[:i+1]))
Zwanzig_df=pd.DataFrame.from_dict({"Lambda":Lambdas,"dG_Forward":dGF,"SUM_dG_Forward":dGF_sum,"dG_Reverse":dGR[::-1],"SUM_dG_Reverse":dGR_sum[::-1],"dG_Average":dG_Average})
Zwanzig_Final_dG = Zwanzig_df['dG_Average'].iloc[-1]
logger.info('Final DG computed from Zwanzig estimator: ' +str(Zwanzig_Final_dG))
return Zwanzig_df, Zwanzig_Final_dG
def Create_df_TI(State_A_df, State_B_df):
"""
create the input dataframe needed for the Thermodynamic Integration (TI) function.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
dU_dH_df : Pandas DataFrame
"""
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
return dU_dH_df
def TI(State_A_df,State_B_df,steps):
"""
Return the estimated binding free energy using Thermodynamic integration (TI) estimator.
Compute free energy differences between each state by integrating
dHdl across lambda values.
Parameters
----------
dHdl : Pandas DataFrame
----------
Returns
----------
delta_f_ : DataFrame
The estimated dimensionless free energy difference between each state.
d_delta_f_ : DataFrame
The estimated statistical uncertainty (one standard deviation) in
dimensionless free energy differences.
states_ : list
Lambda states for which free energy differences were obtained.
TI : float
The free energy difference between state 0 and state 1.
"""
if steps != None:
Energies_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('lambda',sort=False)['fep'].apply(list)),orient='index')
Energies_df=Energies_df.transpose()
Energies_df=Energies_df.iloc[:steps]
dfl=pd.DataFrame(columns=['lambda','fep'])
dU_dH_df=pd.DataFrame(columns=['lambda','fep'])
for state in range (len(Energies_df.columns)):
dfl=pd.DataFrame(columns=['lambda','fep'])
dfl['fep']=Energies_df.iloc[:,state]
dfl['lambda']=Energies_df.columns.values[state]
dU_dH_df=dU_dH_df.append(dfl)
else:
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
# dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"][:steps],"fep":State_B_df["Q_sum"][:steps] - State_A_df["Q_sum"][:steps] })).sort_values('lambda')
# dU_dH_df.reset_index(drop=True,inplace=True)
# dU_dH_df.index.names = ['time']
# dU_dH_df.set_index(['lambda'], append=True,inplace=True)
dHdl=dU_dH_df
# sort by state so that rows from same state are in contiguous blocks,
# and adjacent states are next to each other
dHdl = dHdl.sort_index(level=dHdl.index.names[1:])
# obtain the mean and variance of the mean for each state
# variance calculation assumes no correlation between points
# used to calculate mean
means = dHdl.mean(level=dHdl.index.names[1:])
variances = np.square(dHdl.sem(level=dHdl.index.names[1:]))
# get the lambda names
l_types = dHdl.index.names[1:]
# obtain vector of delta lambdas between each state
dl = means.reset_index()[means.index.names[:]].diff().iloc[1:].values
# apply trapezoid rule to obtain DF between each adjacent state
deltas = (dl * (means.iloc[:-1].values + means.iloc[1:].values)/2).sum(axis=1)
# build matrix of deltas between each state
adelta = np.zeros((len(deltas)+1, len(deltas)+1))
ad_delta = np.zeros_like(adelta)
for j in range(len(deltas)):
out = []
dout = []
for i in range(len(deltas) - j):
out.append(deltas[i] + deltas[i+1:i+j+1].sum())
# Define additional zero lambda
a = [0.0] * len(l_types)
# Define dl series' with additional zero lambda on the left and right
dll = np.insert(dl[i:i + j + 1], 0, [a], axis=0)
dlr = np.append(dl[i:i + j + 1], [a], axis=0)
# Get a series of the form: x1, x1 + x2, ..., x(n-1) + x(n), x(n)
dllr = dll + dlr
# Append deviation of free energy difference between state i and i+j+1
dout.append((dllr ** 2 * variances.iloc[i:i + j + 2].values / 4).sum(axis=1).sum())
adelta += np.diagflat(np.array(out), k=j+1)
ad_delta += np.diagflat(np.array(dout), k=j+1)
# yield standard delta_f_ free energies between each state
delta_f_ = pd.DataFrame(adelta - adelta.T,
columns=means.index.values,
index=means.index.values)
# yield standard deviation d_delta_f_ between each state
d_delta_f_ = pd.DataFrame(np.sqrt(ad_delta + ad_delta.T),
columns=variances.index.values,
index=variances.index.values)
states_ = means.index.values.tolist()
TI=( delta_f_.loc[0.00, 1.00])
return delta_f_ , TI
def Create_df_BAR_MBAR(State_A_df, State_B_df):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) and multistate Bennett Acceptance Ratio (MBAR) estimators.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
u_nk_df : Pandas DataFrame
"""
Energies_df=(pd.DataFrame({"State_A_Lambda":State_A_df["Lambda"],"State_A_G":State_A_df["Q_sum"] ,"State_B_Lambda":State_B_df["Lambda"],"State_B_G":State_B_df["Q_sum"],"E":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('State_A_Lambda')
State_A_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_A_Lambda',sort=False)['State_A_G'].apply(list)),orient='index')
State_A_Energies_df=State_A_Energies_df.transpose()
State_B_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_B_Lambda',sort=False)['State_B_G'].apply(list)),orient="index")
State_B_Energies_df=State_B_Energies_df.transpose()
lambdas_list_A=list(State_A_Energies_df.columns)
lambdas_list_B=list(State_B_Energies_df.columns)
time= [i for i in range(len(State_A_Energies_df))]
lambdas_df=[i for i in State_A_Energies_df.columns]
States={i:[] for i in range(len(lambdas_list_A))}
States_dicts={i:[] for i in range(len(lambdas_list_A))}
for i in range(len(State_A_Energies_df.columns)):
State_A_Energies=State_A_Energies_df.iloc[:,[i]]
State_A_Energies.columns=["0"]
State_A_Lambda_float=State_A_Energies_df.columns[i]
State_B_Energies=State_B_Energies_df.iloc[:,[i]]
State_B_Energies.columns=["0"]
State_B_Lambda_float=State_B_Energies_df.columns[i]
E0=State_A_Energies*State_A_Lambda_float+State_B_Energies*State_B_Lambda_float
for x in range(len(lambdas_list_A)):
E1=State_A_Energies*lambdas_list_A[x]+State_B_Energies*lambdas_list_B[x]
dE=E1-E0
dE=dE.values.tolist()
dE=list(itertools.chain(*dE))
States_dicts[i].append(dE)
for i in range(len(States_dicts)):
States[i]=list(itertools.chain(*States_dicts[i]))
u_nk_df=pd.DataFrame.from_dict(States)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df)
lambdas_df.sort()
u_nk_df['time']=time*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
return u_nk_df,States_dicts,State_A_Energies_df
def Create_df_dG_BAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) estimator and calculates the free energy.
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
BAR_dG : float
"""
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
BAR_df=BAR().fit(u_nk_df)
BAR_dG = BAR_df.delta_f_.loc[0.00, 1.00]
return BAR_dG
def Create_df_dG_MBAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the multistate Bennett Acceptance Ratio (MBAR) estimator and calculates the free energy..
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
MBAR_dG : float
"""
States_length=[]
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_length.append(len(States_dicts[x][i]))
if min(States_length)==max(States_length):
States_dicts2=copy.deepcopy(States_dicts)
else:
print("energy files dosen't have the same length",'min',min(States_length),'max',max(States_length))
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts[x][i]=States_dicts[x][i][:min(States_length)]
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
MBAR_df= MBAR().fit(u_nk_df)
MBAR_dG = MBAR_df.delta_f_.loc[0.00, 1.00]
return MBAR_dG
def Convergence(df1,df2,Estimator,StepsChunk_Int,ReplicatiesCount_Int,EnergyOutputInterval_Int):
# the last and first steps are not included in the reading
"""
Convergence analysis
Retrun a dateframe contains computed free energy dG at a differant steps intervals using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
Parameters
----------
df : Pandas DataFrame
Contains the dEs between the states
Estimator : funcation
The Free energy estimating method (Zwanzig or TI or BAR)
StepsChunk_Int: integer
The Number of Steps(fs) to be used.
ReplicatiesCount_Int: integer
The Number of used replicates.
EnergyOutputInterval_Int: integer
The interval which the molecular dynamics simulation softwear
is writing the energies at.
----------
Returns
----------
Convergence_df : Pandas DataFrame
Contains the computed dG at each interval.
Examples
--------
>>> Convergence(dEs,Zwanzig,1000,1,10)
>>> Convergence(dEs,TI,10000,3,10)
"""
if isinstance(df1, pd.DataFrame) and isinstance(df2, pd.DataFrame) :
dGs_Lst=[Estimator(df1,df2,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
elif isinstance(df2, pd.DataFrame) and not isinstance(df1, pd.DataFrame):
dGs_Lst=[Estimator(df1,df2,steps_limit) for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
else:
dGs_Lst=[Estimator(df1,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
#StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
Convergence_df=pd.DataFrame({'Number of Steps':StepsChunk_Lst, 'dG':dGs_Lst })
return Convergence_df
def Zwanzig_matrix_AI(dEs,steps):
"""
Development in Progress.
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:None]/0.592))))
Lambdas_F=[]
Lambdas_R=[]
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
Lambdas_F.append((re.split('_|-',dEs_df.index[-1])[0])+'_'+(re.split('_|-',dEs_df.index[-1])[0]))
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
Lambdas_R.append((re.split('_|-',dEs_df.index[i])[1])+'_'+(re.split('_|-',dEs_df.index[i])[3]))
Lambdas_F.append((re.split('_|-',dEs_df.index[i-1])[1])+'_'+(re.split('_|-',dEs_df.index[i-1])[3]))
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas_R.append((re.split('_|-',dEs_df.index[-1])[1])+'_'+(re.split('_|-',dEs_df.index[-1])[1]))
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))- | pd.DataFrame(dGR[1:][::-1]) | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def oracle_url() -> str:
conn = os.environ["ORACLE_URL"]
return conn
@pytest.mark.xfail
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_on_non_select(oracle_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
read_sql(oracle_url, query)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_complex_join(oracle_url: str) -> None:
query = "SELECT a.test_int, b.test_date, c.test_num_int FROM test_table a left join test_types b on a.test_int = b.test_num_int cross join (select test_num_int from test_types) c where c.test_num_int < 3"
df = read_sql(oracle_url, query)
df = df.sort_values("TEST_INT").reset_index(drop=True)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 5, 2333], dtype="Int64"),
"TEST_DATE": pd.Series(
["2019-05-21", None, None, "2020-05-21", "2020-05-21", None],
dtype="datetime64[ns]",
),
"TEST_NUM_INT": pd.Series([1, 1, 1, 1, 1, 1], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation(oracle_url: str) -> None:
query = "select avg(test_int), test_char from test_table group by test_char"
df = read_sql(oracle_url, query)
df = df.sort_values("AVG(TEST_INT)").reset_index(drop=True)
expected = pd.DataFrame(
data={
"AVG(TEST_INT)": pd.Series([1, 2, 5, 1168.5], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation(oracle_url: str) -> None:
query = "select sum(test_int) cid, test_char from test_table group by test_char"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=3)
df = df.sort_values("CID").reset_index(drop=True)
expected = pd.DataFrame(
index=range(4),
data={
"CID": pd.Series([1, 2, 5, 2337], dtype="float64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", "str05", None], dtype="object"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_aggregation2(oracle_url: str) -> None:
query = "select DISTINCT(test_char) from test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_CHAR": pd.Series(["str05", "str1 ", "str2 ", None], dtype="object"),
},
)
df.sort_values(by="TEST_CHAR", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_partition_on_aggregation2(oracle_url: str) -> None:
query = "select MAX(test_int) MAX, MIN(test_int) MIN from test_table"
df = read_sql(oracle_url, query, partition_on="MAX", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"MAX": pd.Series([2333], dtype="float64"),
"MIN": pd.Series([1], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_manual_partition(oracle_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(oracle_url, query=queries)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum <= 3"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 ", None], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_limit_large_without_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where rownum < 10"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 2333, 4, 5], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, None, "str05"], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, None, -4.44, None], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(0, 5001),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_without_partition_range(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 1"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2], dtype="Int64"),
"TEST_CHAR": pd.Series(["str1 ", "str2 "], dtype="object"),
"TEST_FLOAT": pd.Series([1.1, 2.2], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_selection(oracle_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
oracle_url,
query,
partition_on="test_int",
partition_range=(1, 2333),
partition_num=3,
)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([1, 2, 4, 5, 2333], dtype="Int64"),
"TEST_CHAR": pd.Series(
["str1 ", "str2 ", None, "str05", None], dtype="object"
),
"TEST_FLOAT": pd.Series([1.1, 2.2, -4.44, None, None], dtype="float64"),
},
)
df.sort_values(by="TEST_INT", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_with_partition_and_spja(oracle_url: str) -> None:
query = "select test_table.test_int cid, SUM(test_types.test_num_float) sfloat from test_table, test_types where test_table.test_int=test_types.test_num_int group by test_table.test_int"
df = read_sql(oracle_url, query, partition_on="cid", partition_num=2)
expected = pd.DataFrame(
data={
"CID": pd.Series([1, 5], dtype="Int64"),
"SFLOAT": pd.Series([2.3, -0.2], dtype="float64"),
},
)
df.sort_values(by="CID", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_types(oracle_url: str) -> None:
query = "SELECT * FROM test_types"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_NUM_INT": pd.Series([1, 5, 5, None], dtype="Int64"),
"TEST_INT": pd.Series([-10, 22, 22, 100], dtype="Int64"),
"TEST_NUM_FLOAT": pd.Series([2.3, -0.1, -0.1, None], dtype="float64"),
"TEST_FLOAT": pd.Series([2.34, 123.455, 123.455, None], dtype="float64"),
"TEST_BINARY_FLOAT": pd.Series(
[-3.456, 3.1415926535, 3.1415926535, None], dtype="float64"
),
"TEST_BINARY_DOUBLE": pd.Series(
[9999.99991, -111111.2345, -111111.2345, None], dtype="float64"
),
"TEST_CHAR": pd.Series(["char1", "char2", "char2", None], dtype="object"),
"TEST_VARCHAR": pd.Series(
["varchar1", "varchar222", "varchar222", None], dtype="object"
),
"TEST_NCHAR": pd.Series(
["y123 ", "aab123", "aab123", None], dtype="object"
),
"TEST_NVARCHAR": pd.Series(
["aK>?KJ@#$%", ")>KDS)(F*&%J", ")>KDS)(F*&%J", None], dtype="object"
),
"TEST_DATE": pd.Series(
["2019-05-21", "2020-05-21", "2020-05-21", None], dtype="datetime64[ns]"
),
"TEST_TIMESTAMP": pd.Series(
[
"2019-05-21 01:02:33",
"2020-05-21 01:02:33",
"2020-05-21 01:02:33",
None,
],
dtype="datetime64[ns]",
),
"TEST_TIMESTAMPTZ": pd.Series(
[
"1999-12-01 11:00:00",
"1899-12-01 11:00:00",
"1899-12-01 11:00:00",
None,
],
dtype="datetime64[ns]",
),
"TEST_CLOB": pd.Series(
["13ab", "13ab", "13ab", None], dtype="object"
),
"TEST_BLOB": pd.Series(
[ b'9\xaf', b'9\xaf', b'9\xaf', None], dtype="object"
),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_empty_result(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(oracle_url, query)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([], dtype="Int64"),
"TEST_CHAR": pd.Series([], dtype="object"),
"TEST_FLOAT": pd.Series([], dtype="float64"),
}
)
assert_frame_equal(df, expected, check_names=True)
@pytest.mark.skipif(
not os.environ.get("ORACLE_URL"), reason="Test oracle only when `ORACLE_URL` is set"
)
def test_oracle_empty_result_on_partition(oracle_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(oracle_url, query, partition_on="test_int", partition_num=3)
print(df)
expected = pd.DataFrame(
data={
"TEST_INT": pd.Series([], dtype="Int64"),
"TEST_CHAR": pd.Series([], dtype="object"),
"TEST_FLOAT": | pd.Series([], dtype="float64") | pandas.Series |
"""This module was designed specifically to be used by a covid-19 dashboard application and
contains functions to request covid data from uk_covid19 api, process it and send them to the UI.
"""
import logging
import typing
import pandas as pd
from uk_covid19 import Cov19API, exceptions
from requests.exceptions import ConnectionError as requestsConnectionError
import dashboard.scheduler_updater as us
import dashboard.covid_news_handling as cnh
import dashboard.config_handler as ch
display_covid = {}
def parse_csv_data(csv_filename: str) -> list:
"""Takes in a csv filename and returns a list with each item being a new line of the file.
:param csv_filename: The name of a csv filename, '.csv' appendix is optional
:type csv_filename: str
:return: A list of strings with each item being a single line of the csv file
:rtype: list
"""
logging.info("starting parse_csv_data function")
csv_filename = str(csv_filename) # Makes sure input is a string
if csv_filename[-4:] != ".csv": # Checks if the filename has the
csv_filename += ".csv" # '.csv' appendix and appends it if necessary
with open(csv_filename, "r", encoding='utf-8') as data:
logging.info("parse_csv_data function finished")
return [word.split()[0] for word in data.read().splitlines()]
# returns data as a list of strings for each row in the csv file
def process_covid_csv_data(covid_csv_data: list) -> typing.Optional[tuple]:
"""Takes covid data and returns last 7 days of cases, cumulative hospital cases and deaths.
Takes covid data as a list of strings as returned by parse_csv_data function, where each
word in a string represents a data value with the first row being the headers of each column.
This function converts this data into a pandas dataframe to organise the data and then
returns a tuple containing the sum of the last 7 days of cases, the current cumulative
amount of hospital cases and the cumulative deaths.
:parameter covid_csv_data: covid data as returned by parse_csv_data function
:type covid_csv_data: list
:return: The sum of the last 7 days of covid cases, the cumulative amount of hospital cases
and the cumulative amount of deaths or None
:rtype: tuple or None
"""
logging.info('starting process_covid_csv_data function')
covid_csv_data = [word.split(',') for word in covid_csv_data]
# Converts list of strings into list of lists as required by pandas dataframe
covid_dataframe = | pd.DataFrame(covid_csv_data[1:], columns=covid_csv_data[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 09:34:37 2018
@author: SilverDoe
"""
'''
========================== DataFrame ==========================================
pandas.DataFrame( data, index, columns, dtype, copy)
Parameters :
============
1. data : data takes various forms like ndarray, series, map, lists, dict, constants
and also another DataFrame.
2. index : For the row labels, the Index to be used for the resulting frame is
Optional Default np.arrange(n) if no index is passed.
3. columns : For column labels, the optional default syntax is - np.arrange(n).
This is only true if no index is passed.
4. dtype : Data type of each column.
5. copy : This command (or whatever it is) is used for copying of data, if the
default is False.
'''
#=============== Empty DataFrame =================================================
import pandas as pd
df = pd.DataFrame()
print(df)
#============= DataFrame from Lists ============================================
# no index passed, no column names given
import pandas as pd
data = [1,2,3,4,5]
df = pd.DataFrame(data)
print(df)
# no index passed, column names given
import pandas as pd
data = [['Natsu',13],['Lisanna',9],['Happy',1]]
df = pd.DataFrame(data,columns=['Name','Age'])
print(df)
# no index passed, column names given, datatype passed
import pandas as pd
data = [['Natsu',13],['Lisanna',8],['Happy',1]]
df = | pd.DataFrame(data,columns=['Name','Age'],dtype=float) | pandas.DataFrame |
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch import tensor, float32
import json
from collections import defaultdict
# представление очищенного датасета в pytorch
class DatasetModel(Dataset):
def __init__(self, df, vectorizer):
self.df = df
self._vectorizer = vectorizer
self._max_seq_length = max(map(len, self.df.predictor)) + 2
self.train_df = self.df[self.df.split == 'train']
self.train_size = len(self.train_df)
self.valid_df = self.df[self.df.split == 'valid']
self.valid_size = len(self.valid_df)
self.test_df = self.df[self.df.split == 'test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'valid': (self.valid_df, self.valid_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# веса для классов
class_counts = self.train_df.target.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.target_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequences = [count for _, count in sorted_counts]
self.class_weights = 1.0 / tensor(frequences, dtype=float32)
# загружает данные и создаёт векторизатор
@classmethod
def make_vectorizer(cls, path: str):
df = pd.read_csv(path)
train_df = df[df.split == 'train']
return cls(df, PredictorVectorizer.from_dataframe(train_df))
def get_vectorizer(self):
return self._vectorizer()
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def set_split(self, split='train'):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
# точка входа для данных в pytorch
def __getitem__(self, index):
"index - индекс точки данных"
row = self._target_df.iloc[index]
predictor_vector, vec_length = self._vectorizer.vectorize(row.predictor, self._max_seq_length)
target_index = self._vectorizer.target_vocab.lookup_token(row.target)
return {'x_data': predictor_vector,
'y_target': target_index,
'x_length': vec_length}
def get_num_batches(self, batch_size):
return len(self) // batch_size
# векторизатор, приводящий словари в соотвествие друг другу и использующий их
class PredictorVectorizer:
def __init__(self, char_vocab, target_vocab):
"""
Аргументы:
char_vocab(Vocabulary) - последовательности в словари
target_vocab - таргет(категория) в словари
"""
self.char_vocab = char_vocab
self.target_vocab = target_vocab
def vectorize(self, predictor, vector_length=-1):
"""
Аргументы:
predictor - размер вложений символов
vector_length - длина вектора индексов
"""
indices = [self.char_vocab.begin_seq_index]
indices.extend(self.char_vocab.lookup_token(token)
for token in predictor)
indices.append(self.char_vocab.end_seq_index)
if vector_length < 0:
vector_length = len(indices)
out_vector = np.zeros(vector_length, dtype=np.int64)
out_vector[:len(indices)] = indices
out_vector[len(indices):] = self.char_vocab.mask_index
return out_vector, len(indices)
@classmethod
def from_dataframe(cls, df: pd.DataFrame):
char_vocab = SequenceVocabulary()
target_vocab = Vocabulary()
for index, row in df.iterrows():
tokens = row.predictor.split(' ')
for token in tokens:
char_vocab.add_token(token)
target_vocab.add_token(row.target)
return cls(char_vocab, target_vocab)
@classmethod
def from_serializable(cls, contents):
char_vocab = SequenceVocabulary.from_serializable(contents['char_vocab'])
target_vocab = Vocabulary.from_serializable(contents['target_vocab'])
return cls(char_vocab=char_vocab, target_vocab=target_vocab)
def to_serializable(self):
return {'char_vocab': self.char_vocab.to_serializable(),
'target_vocab': self.target_vocab.to_serializable()}
# отображение токенов в числовую форму - технические словари
class Vocabulary:
"""
Аргументы:
token_to_idx: dict - соотвествие токенов индексам
add_unk: bool - нужно ли добавлять токен UNK
unk_token - добавляемый в словарь токен UNK
"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token='<UNK>'):
if token_to_idx is None:
token_to_idx = dict()
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
# сериализуемый словарь
def to_serializable(self):
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
# экземпляр класса на основе сериализованного словаря
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
# обновляет словари отображения - если токен не найден, то добавляет в словарь
def add_token(self, token):
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
# извлекает соответствующий токену индекс или индекс UNK, если токен не найден
def lookup_token(self, token):
if self._add_unk:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
# возвращает соотвествующий индексу токен
def lookup_index(self, index):
if index not in self._idx_to_token:
raise KeyError('Индекс (%d) не в словаре' % index)
return self._idx_to_token[index]
def __str__(self):
return '<Словарь (size=%d)>' % len(self)
def __len__(self):
return len(self._token_to_idx)
# токенизация последовательностей
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token='<UNK>',
mask_token="<MASK>", begin_seq_token='<BEGIN>',
end_seq_token='<END>'):
super(SequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token # для работы с последовательностями переменной длины
self._unk_token = unk_token # для обозначения отсуствующих токенов в словаре
self._begin_seq_token = begin_seq_token # начало предложения
self._end_seq_token = end_seq_token # конец предложения
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class TrainValidSplit:
def __init__(self,
train_proportion: float,
valid_proportion: float,
test_proportion: float,
raw_df_path: str,
seed: int):
self.by_target = self.get_target_dict(raw_df_path)
self.final_list = self.make_split(self.by_target, train_proportion, valid_proportion,
test_proportion, seed)
@staticmethod
def get_target_dict(raw_df_path):
df = | pd.read_csv(raw_df_path) | pandas.read_csv |
import os
import copy
import datetime
import warnings
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import math
from datetime import datetime
import random
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten, Embedding, Dropout, PReLU,ReLU
from keras.layers import Bidirectional, SpatialDropout1D, CuDNNGRU,CuDNNLSTM, Conv1D,Conv2D,MaxPool2D,Reshape
from keras.layers import GlobalAvgPool1D, GlobalMaxPool1D, concatenate,GlobalMaxPooling1D,GlobalAveragePooling1D
from keras.regularizers import l2,l1
from keras.layers.normalization import BatchNormalization
from keras.engine import Layer
from keras.layers.core import Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation,BatchNormalization
from keras.regularizers import l1,l2
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
import gc
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv')
train = pd.read_csv('../../data/features/base_train.csv')
test = pd.read_csv('../../data/features/base_test.csv')
train=memory_preprocess._memory_process(train)
test=memory_preprocess._memory_process(test)
print(test.info())
gc.collect()
actived_features_all = pd.read_csv('../../data/features/actived_features_all.csv')
actived_features_all=memory_preprocess._memory_process(actived_features_all)
train = pd.merge(train, actived_features_all, how='left', on='uId').fillna(0)
test = pd.merge(test, actived_features_all, how='left', on='uId').fillna(0)
del actived_features_all
gc.collect()
act_use_rnn_hide_train=pd.read_csv('../../data/features/act_use_rnn_hide_train.csv')
act_use_rnn_hide_train=memory_preprocess._memory_process(act_use_rnn_hide_train)
act_use_rnn_hide_train.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
train = pd.merge(train, act_use_rnn_hide_train, how='left', on='uId').fillna(0)
del act_use_rnn_hide_train
act_use_rnn_hide_test=pd.read_csv('../../data/features/act_use_rnn_hide_test.csv')
act_use_rnn_hide_test=memory_preprocess._memory_process(act_use_rnn_hide_test)
act_use_rnn_hide_test.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
test = pd.merge(test, act_use_rnn_hide_test, how='left', on='uId').fillna(0)
print(test.info())
del act_use_rnn_hide_test
gc.collect()
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(row['app_list_x'] + row['app_list_y'])
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
del usage_list
del usage_train
gc.collect()
train_uId = x_train.uId.tolist()
test_uId = x_test.uId.tolist()
test.index = test.uId.tolist()
train.index = train.uId.tolist()
test = test.loc[test_uId,:]
train = train.loc[train_uId,:]
appId = pd.read_csv('../../data/processed_data/appId.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv')
usage_appId = usage_appId[-10000:]
usage_appId['id'] = np.arange(0,10000)
all_appid = list(set(appId.appId.tolist() + usage_appId.appId.tolist()))
app_dict = dict(zip(all_appid,np.arange(len(all_appid))))
x_train = [[x for x in apps if x in app_dict] for apps in x_train.app_list]
x_test = [[x for x in apps if x in app_dict] for apps in x_test.app_list]
x_train = [" ".join(app) for app in x_train]
x_test = [" ".join(app) for app in x_test]
c_vec1 = CountVectorizer(lowercase=False,ngram_range=(1,1),dtype=np.int8)
c_vec1.fit(x_train + x_test)
x_train = c_vec1.transform(x_train).toarray()
x_test = c_vec1.transform(x_test).toarray()
gc.collect()
train.drop(['uId','age_group'],axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
train = train.replace([np.inf, -np.inf], np.nan).fillna(0)
test = test.replace([np.inf, -np.inf], np.nan).fillna(0)
scaler = MinMaxScaler()
scaler.fit(pd.concat([train,test],axis=0))
train = scaler.transform(train)
test = scaler.transform(test)
train = memory_preprocess._memory_process(pd.DataFrame(train))
test = memory_preprocess._memory_process(pd.DataFrame(test))
gc.collect()
x_train = np.hstack((x_train,train.values))
x_test = np.hstack((x_test,test.values))
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=None)
def mlp_v3():
model = Sequential()
model.add(Dense(1024, input_shape=(13,400,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
#
model.add(Dense(6))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Nadam',
metrics=['accuracy'])
return model
from sklearn.model_selection import train_test_split, StratifiedKFold
kfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=False)
y_test = np.zeros((x_test.shape[0],6))
y_val = np.zeros((x_train.shape[0],6))
for i, (train_index, valid_index) in enumerate(kfold.split(x_train, np.argmax(y_train,axis=1))):
X_train, X_val, Y_train, Y_val = x_train[train_index],x_train[valid_index], y_train[train_index], y_train[valid_index]
filepath="weights_best2.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr]
model = mlp_v3()
if i == 0:print(model.summary())
model.fit(X_train, Y_train, batch_size=128, epochs=5, validation_data=(X_val, Y_val), verbose=1, callbacks=callbacks,
)
model.load_weights(filepath)
y_val[valid_index] = model.predict(X_val, batch_size=128, verbose=1)
y_test += np.array(model.predict(x_test, batch_size=128, verbose=1))/5
y_val = | pd.DataFrame(y_val,index=train_uId) | pandas.DataFrame |
# Import Module
import PyPDF2
from PyPDF2.utils import PdfReadError
import pdfx
from urlextract import URLExtract
import requests
import fitz
import click
import argparse
import os
from urllib.parse import urlparse, ParseResult
from fpdf import FPDF
import gspread
import pandas as pd
from gspread_dataframe import get_as_dataframe, set_with_dataframe
#import pdb;pdb.set_trace()
# Parse args
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-p','--path', help='Localization of the files', default= "./CitationSaver/")
parser.add_argument('-d','--destination', help='Destination of the URLs extract', default= "./URLs/")
parser.add_argument('-a','--afterprocessed', help='Destination of the files processed', default= "./Processed/")
parser.add_argument('-w','--pathwarc', help='Destination of the WARCs for each file', default= "./WARCs/")
parser.add_argument('-j','--pathjson', help='Destination of the json file with google service key', default= "JSON")
parser.add_argument('-k','--key', help='Key Google Spreadsheet', default= "KEY")
parser.add_argument('-ws','--worksheet', help='Worksheet Google Spreadsheet', default= "WORKSHEET")
args = vars(parser.parse_args())
#Connect gspread
gc = gspread.service_account(filename=args['pathjson'])
sh = gc.open_by_key(args['key'])
worksheet = sh.worksheet(args['worksheet'])
#Transform worksheet to pandas dataframe
df = get_as_dataframe(worksheet)
#Global variable with the URLs check for each document
list_urls_check = []
# Extract URLs from text
def extract_url(text, list_urls):
extractor = URLExtract()
urls = extractor.find_urls(text)
for url in urls:
url = url.replace(",", "")
if "http" in url:
url = url[url.find('http'):]
if url not in list_urls:
list_urls.append(url)
# Check if the URLs is available
def check_url(scheme, netloc, path, url_parse, output):
url_parse = ParseResult(scheme, netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
else:
url_parse = ParseResult("https", netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
def check_pdf(file_name, file):
try:
pdf = PyPDF2.PdfFileReader(file_name)
return True
except PdfReadError:
return False
def extract_urls_pdf(file, file_name, list_urls):
#First method: PyPDF2
# Open File file
pdfFileObject = open(file_name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
# Iterate through all pages
for page_number in range(pdfReader.numPages):
pageObject = pdfReader.getPage(page_number)
# Extract text from page
pdf_text = pageObject.extractText()
extract_url(pdf_text, list_urls)
if not list_urls:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PyPDF2 process", True)
# CLose the PDF
pdfFileObject.close()
#Second method: PDFx
# Read PDF File
pdf = pdfx.PDFx(file_name)
# Get list of URL
json = pdf.get_references_as_dict()
if len(json) != 0:
for elem in json['url']:
if elem not in list_urls:
list_urls.append(elem)
else:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PDFx process", True)
#Third method: fitz
# Load PDF
with fitz.open(file_name) as doc:
text = ""
for page in doc:
text += page.getText().strip()#.replace("\n", "")
text = ' '.join(text.split())
extract_url(text, list_urls)
def check_urls(list_urls, output_file):
urls_to_google_sheet = []
if list_urls != []:
# Process the URLs
with open(output_file, 'w') as output:
# Remove mailto links
links = [url for url in list_urls if "mailto:" not in url]
for elem in links:
#Remove trash at the end of the URLs
if elem.endswith(";") or elem.endswith(".") or elem.endswith(")") or elem.endswith("/"):
elem = elem[:-1]
url_parse = urlparse(elem, 'http')
#URL parse
scheme = url_parse.scheme
netloc = url_parse.netloc or url_parse.path
path = url_parse.path if url_parse.netloc else ''
if not netloc.startswith('www.'):
netloc = 'www.' + netloc
try:
#Check if URL
check_url(scheme, netloc, path, url_parse, output)
except:
continue
#else:
#do something
def update_google_sheet(file, path_output, list_urls, list_urls_check, note, error):
#Get the index from the file being processed in the google sheet
index = df.index[df['File Name CitationSaver System']==file].tolist()
if not error:
#Check if columns are empty for the present row
if pd.isnull(df.at[index[0], 'Results URLs File Path']) and pd.isnull(df.at[index[0], 'Results URLs without check']) and pd.isnull(df.at[index[0], 'Results URLs with check']):
#Update value Google Sheet
df.at[index[0], 'Results URLs File Path'] = path_output
df.at[index[0], 'Results URLs without check'] = list_urls
df.at[index[0], 'Results URLs with check'] = list_urls_check
if note != "":
if not | pd.isnull(df.at[index[0], 'Note/Error']) | pandas.isnull |
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pandas as pd
from matplotlib.pyplot import imread
from models import art_rem1
from torch.utils.data import DataLoader
from datas import dataset_loader, dataLoader_whul2p_unet_oam, dataLoader_uhul2p_unet_oam, dataLoader_whuhul2p_unet_oam, dataLoader_uhwh2p_unet_oam
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scd
import sys
sys.path.append("..")
from utils.logutils import LogUtils
import utils.check_points_utils as checkpoint_util
from torch.autograd import Variable
from torchvision import transforms
from datas import normalizeData
from burstLoss import BurstLoss as BL
# from dataloader import load_data as data_loader
#Pass the arguments
parser = argparse.ArgumentParser(description="art_rem1")
parser.add_argument("--batchSize", type=int, default=4, help="Training batch size")
parser.add_argument("--num_epochs", type=int, default=600, help="Number of training epochs")
parser.add_argument("--decay_step", type=int, default=100, help="The step at which the learning rate should drop")
parser.add_argument("--lr_decay", type=float, default=0.5, help='Rate at which the learning rate should drop')
parser.add_argument("--lr", type=float, default=0.01, help="Initial learning rate")
parser.add_argument("--data_dir", type=str, default=" ", help='path of data')
parser.add_argument("--log_dir", type=str, default=" ", help='path of log files')
parser.add_argument("--write_freq", type=int, default=50, help="Step for saving Checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="Checkpoint to start from")
parser.add_argument("--gpu_no", type=str, default="0", help="GPU number")
parser.add_argument("--input_channel", type=int, default=2, help="Input channels")
parser.add_argument("--start_id", type=int, default=1, help="Start data id")
parser.add_argument("--end_id", type=int, default=40, help="End data id")
parser.add_argument("--start_dev_id", type=int, default=0, help="Start data id for dev set")
parser.add_argument("--end_dev_id", type=int, default=0, help="End data id for dev set")
parser.add_argument("--num_of_ang", type=int, default=5, help="Number of angles that one object rotates for")
parser.add_argument("--num_of_motion", type=int, default=5, help="Number of motions that one object moves")
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_no
# load the training data set
# input_set, groundTruth_set, mask = dataset_loader(opt.data_dir)
# input_set = torch.FloatTensor(np.array(input_set))
# groundTruth_set = torch.FloatTensor(np.array(groundTruth_set))
# mask = torch.FloatTensor(np.array(mask))
# mask = mask/255
# norm_input = normalizeData(input_set)
# norm_gt = normalizeData(groundTruth_set)
input_set, groundTruth_set, mask, filenames = dataLoader_uhwh2p_unet_oam(opt.data_dir, opt.start_id, opt.end_id, opt.num_of_ang, opt.num_of_motion)
input_set = torch.FloatTensor(np.array(input_set))
groundTruth_set = torch.FloatTensor(np.array(groundTruth_set))
mask = torch.FloatTensor(np.array(mask))
train_set=[]
for i in range(len(input_set)):
train_set.append([input_set[i], groundTruth_set[i], mask[i], filenames[i]])
num_workers = len(np.fromstring(opt.gpu_no, dtype=int, sep=','))
trainLoader = DataLoader(dataset=train_set, num_workers=num_workers, batch_size=opt.batchSize, shuffle=True, pin_memory=True)
# Define the loss function
mse_loss = nn.MSELoss(reduction='mean')
def squared_diff(mask, output, groundTruth):
sq_diff = torch.square(output - groundTruth)
mask_sq_diff = torch.mul(mask,sq_diff)
loss = torch.mean(mask_sq_diff)
return loss
def ab_diff(mask, output, groundTruth):
abs_diff = torch.abs(output - groundTruth)
mask_abs_diff = torch.mul(mask,abs_diff)
loss = torch.mean(mask_abs_diff)
return loss
def covariance(output, out_mean, groundTruth, gt_mean):
out = output - out_mean
gt = groundTruth - gt_mean
prod = torch.mul(out,gt)
prod_sum = torch.sum(prod)
covar = prod_sum/(((output.shape[2])*(output.shape[3]))-1)
return covar
def ssim_ind(output, groundTruth):
k1 = 0.01
k2 = 0.03
out_mean = torch.mean(output)
gt_mean = torch.mean(groundTruth)
out_var = torch.var(output)
gt_var = torch.var(groundTruth)
covar_var = covariance(output, out_mean, groundTruth, gt_mean)
c1 = (k1*255)*(k1*255)
c2 = (k2*255)*(k2*255)
num = ((2*out_mean*gt_mean)+c1)*((2*covar_var)+c2)
den = ((out_mean*out_mean)+(gt_mean*gt_mean)+c1)*((out_var*out_var)+(gt_var*gt_var)+c2)
ssim = num/den
return ssim
def ssim_loss(mask,output,groundTruth):
out = torch.mul(output, mask)
gt = torch.mul(groundTruth,mask)
ssim = ssim_ind(out,gt)
loss = 1 - ssim
return loss
criterion = BL()
def burst_loss(mask, output, groundTruth):
out = torch.mul(output,mask)
gt = torch.mul(groundTruth,mask)
loss = criterion(out,gt)
return loss
iters = -1
#Define the log directory for checkpoints
if os.path.exists(opt.log_dir) is not True:
os.makedirs(opt.log_dir)
checkpoints_dir = os.path.join(opt.log_dir, "checkpoints")
if os.path.exists(checkpoints_dir) is not True:
os.mkdir(checkpoints_dir)
# Load the model
model = art_rem1(opt.input_channel).cuda()
model = nn.DataParallel(model) # For using multiple GPUs
# Define the optimizer
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
#Load status from checkpoint
log_open_mode = 'w'
start_epoch = 0
if opt.checkpoint is not None:
fname = os.path.join(checkpoints_dir, opt.checkpoint)
start_epoch, iters = checkpoint_util.load_checkpoint(model_3d=model, optimizer=optimizer, filename=fname)
start_epoch += 1
log_open_mode = 'a'
log = LogUtils(os.path.join(opt.log_dir, 'logfile'), log_open_mode)
log.write('Supervised learning for motion artifact reduction - Training\n')
log.write_args(opt)
lr_scheduler = lr_scd.StepLR(optimizer, step_size=opt.decay_step, gamma=opt.lr_decay)
iters = max(iters,0)
reg = 1e-7
# Train the network on the training dataset
for epoch_num in range(start_epoch, opt.num_epochs):
trainData = iter(trainLoader)
ave_loss = 0
count = 0
for data in iter(trainLoader):
optimizer.zero_grad()
if lr_scheduler is not None:
lr_scheduler.step(iters)
inp_PM, gt_PM, mask_PM, filename_PM = next(trainData)
# inp_PM = torch.unsqueeze(inp_PM,1).cuda()
inp_PM = inp_PM.cuda()
gt_PM = torch.unsqueeze(gt_PM,1).cuda()
mask_PM = torch.unsqueeze(mask_PM,1).cuda()
output_PM = model(inp_PM)
# loss = ab_diff(mask_PM, output_PM, gt_PM)
loss = burst_loss(mask_PM, output_PM, gt_PM)
loss.backward()
optimizer.step()
iters += 1
ave_loss += loss
count += 1
lr_scheduler.get_last_lr()
ave_loss /= count
for param_group in optimizer.param_groups:
print('\nTraining at Epoch %d with a learning rate of %f.' %(epoch_num, param_group["lr"]))
if opt.write_freq != -1 and (epoch_num + 1) % opt.write_freq is 0:
fname = os.path.join(checkpoints_dir, 'checkpoint_{}'.format(epoch_num))
checkpoint_util.save_checkpoint(filename=fname, model_3d=model, optimizer=optimizer, iters=iters, epoch=epoch_num)
# Write CSV files
out = output_PM[0][0].detach().cpu().numpy()
filename = opt.log_dir + str("/epoch_") + str(epoch_num) + str("_") + str(filename_PM[0]) + str("_outputPM.csv")
| pd.DataFrame(out) | pandas.DataFrame |
import pandas as pd
# import camping_server2.config as config
from datetime import datetime
class Sigungucode:
def __init__(self):
# self.path = config.Config.PATH
self.do_list = {'충북': '충청북도', '충남': '충청남도',
'경북': '경상북도', '경남': '경상남도',
'전북': '전라북도', '전남': '전라남도',
'강원': '강원도', '경기': '경기도',
'인천': '인천광역시', '인천시': '인천광역시',
'부산': '부산광역시', '울산': '울산광역시', '대전': '대전광역시',
'대구': '대구광역시', '광주': '광주광역시',
'서울': '서울특별시', '서울시': '서울특별시',
'제주': '제주특별자치도', '제주도': '제주특별자치도'}
self.five_code = | pd.read_csv('/Users/sol/Desktop/dss/Crawling/datas/sigungucode.csv') | pandas.read_csv |
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
isna,
)
import pandas._testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame["A"][:5] = np.nan
frame["B"][5:10] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
expected.loc["B", "A"] = np.nan
tm.assert_frame_equal(result, expected)
# regular
result = frame.cov()
expected = frame["A"].cov(frame["C"])
tm.assert_almost_equal(result["A"]["C"], expected)
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(
np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(
np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns,
columns=df.columns,
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import os
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
from pandas.tseries.offsets import Day, Nano
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("labels", ["foo", 1, True])
def test_qcut_incorrect_labels(labels):
# GH 13318
values = range(5)
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize("labels", [["a", "b", "c"], list(range(3))])
def test_qcut_wrong_length_labels(labels):
# GH 13318
values = range(10)
msg = "Bin labels must be one fewer than the number of bin edges"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize(
"labels, expected",
[
(["a", "b", "c"], Categorical(["a", "b", "c"], ordered=True)),
(list(range(3)), Categorical([0, 1, 2], ordered=True)),
],
)
def test_qcut_list_like_labels(labels, expected):
# GH 13318
values = range(3)
result = qcut(values, 3, labels=labels)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
({"duplicates": "drop"}, None),
({}, "Bin edges must be unique"),
({"duplicates": "raise"}, "Bin edges must be unique"),
({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = | qcut(ser, bins) | pandas.qcut |
"""
Criado em 28/03/2022
por <NAME>
Soluções e Serviços
v1.0.2 => Correção para novo formato de extração do mongocsv
v1.1.0 => Nova funcionalidade de análise de baterias das estações
"""
import os
import sys
import time
import getopt
import pathlib
import xlsxwriter
import pandas as pd
from dask import dataframe as dd
from typing import ContextManager, Optional
from alive_progress import alive_bar
pd.options.mode.chained_assignment = None # default='warn'
path = pathlib.Path().resolve()
os.chdir(f'{path}/data_input')
print("**************************************************************************************")
print("")
print(" Data Aggregator - v1.1.0")
print("")
print(" by <NAME>")
print(" 03/2022")
print("**************************************************************************************")
print("")
def csv_toERPanalysis(file_path):
with spinner("Agregando dados de ERPs..."):
# Extração dos dados obtidos pelo mongocsv
ext = dd.read_csv(f'/{file_path}',dtype={'Value': 'object'})
ext = ext.categorize(columns = ['ERP'])
ext = ext.categorize(columns = ['DateTime'])
ext = ext.categorize(columns = ['Tag'])
ext = ext.compute()
ext['DateTime'] = pd.to_datetime(ext['DateTime'], format='%Y-%m-%dT%H:%M:%S.%f%z')
ext['DateTime'] = ext['DateTime'].dt.date
gb = ext.groupby(['ERP','Tag','DateTime']).agg({'Value': ['count']})
gb_r = gb.reset_index()
gb_r = gb_r.droplevel(1,axis=1)
gb_r['Value'] = (gb_r['Value']/288)*100
gb_r.loc[gb_r['Value'] > 100,'Value'] = 100
# Todos os dados tabulados, em valor completo e em porcentagem
alldata_p = gb_r.pivot_table(values = 'Value', index = ['ERP','DateTime'], columns = 'Tag', aggfunc = lambda x: ', '.join(x.astype(str)))
alldata_p = alldata_p.reset_index()
##################################################################################
ext_bateria = ext[ext['Tag'] == "EQPBATT"]
ext_bateria['Value'] = ext_bateria['Value'].str[:2].astype(float)
gb_batt = ext_bateria.groupby(['ERP','Tag','DateTime']).agg({'Value': ['mean']})
gb_rbatt = gb_batt.reset_index()
gb_rbatt = gb_rbatt.droplevel(1,axis=1)
gb_bateria = gb_rbatt[gb_rbatt['Tag'] == 'EQPBATT']
# Todos os dados tabulados, em valor completo e em porcentagem
alldata_batt = gb_bateria.pivot_table(values = 'Value', index = ['ERP','DateTime'], columns = 'Tag', aggfunc = lambda x: ', '.join(x.astype(str)))
alldata_batt = alldata_batt.reset_index()
os.chdir(f'{path}/data_output')
writer = | pd.ExcelWriter('output.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import hashlib
from datetime import datetime
from pathlib import Path
from struct import calcsize
from struct import unpack
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
from mootdx.consts import MARKET_SH
from mootdx.consts import MARKET_SZ
from mootdx.logger import log
from mootdx.utils.adjust import to_adjust
def get_stock_markets(symbols=None):
results = []
assert isinstance(symbols, list), 'stock code need list type'
if isinstance(symbols, list):
for symbol in symbols:
results.append([get_stock_market(symbol, string=False), symbol.strip('sh').strip('sz')])
return results
def get_stock_market(symbol='', string=False):
""" 判断股票ID对应的证券市场匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '12','13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param string: False 返回市场ID,否则市场缩写名称
:param symbol: 股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'
"""
assert isinstance(symbol, str), 'stock code need str type'
market = None
if symbol.startswith(('sh', 'sz')):
market = symbol[:2]
elif symbol.startswith(('50', '51', '60', '68', '90', '110', '113', '132', '204')):
market = 'sh'
elif symbol.startswith(('00', '12', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
market = 'sz'
elif symbol.startswith(('5', '6', '9', '7')):
market = 'sh'
if string is False:
market = MARKET_SZ if market == 'sz' else MARKET_SH
return market
def gpcw(filepath):
cw_file = open(filepath, 'rb')
header_size = calcsize('<3h1H3L')
stock_item_size = calcsize('<6s1c1L')
data_header = cw_file.read(header_size)
stock_header = unpack('<3h1H3L', data_header)
max_count = stock_header[3]
for idx in range(0, max_count):
cw_file.seek(header_size + idx * calcsize('<6s1c1L'))
si = cw_file.read(stock_item_size)
stock_item = unpack('<6s1c1L', si)
code = stock_item[0].decode()
foa = stock_item[2]
cw_file.seek(foa)
info_data = cw_file.read(calcsize('<264f'))
cw_info = unpack('<264f', info_data)
log.debug(f'{code}, {cw_info}')
return code, cw_info
def md5sum(downfile):
"""
文件的 md5 哈希值
:param downfile: 文件路径
:return: mixed
"""
try:
md5_l = hashlib.md5()
md5_l.update(Path(downfile).read_bytes())
return md5_l.hexdigest()
except (IOError, FileNotFoundError) as e:
log.error(f'无法读取文件: {downfile}')
log.debug(e)
return None
def to_data(v, **kwargs):
"""
数值转换为 pd.DataFrame
:param v: mixed
:return: pd.DataFrame
"""
symbol = kwargs.get('symbol')
adjust = kwargs.get('adjust', None)
if adjust in ['01', 'qfq', 'before']:
adjust = 'qfq'
elif adjust in ['02', 'hfq', 'after']:
adjust = 'hfq'
else:
adjust = None
# 空值
if not v:
return pd.DataFrame(data=[])
# DataFrame
if isinstance(v, DataFrame):
result = v
# 列表
elif isinstance(v, list):
result = | pd.DataFrame(data=v) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Tarea Número 1
# # Ejercicio 1 - A
# In[3]:
# 1
import math as mt
r=mt.pi*5**4-mt.sqrt(9)
print(r)
# In[4]:
# 2
r=12-17*(2/7)-9
mt.fabs(r)
# In[5]:
# 3
r=mt.factorial(7)
print(r)
# In[6]:
# 4
mt.log(19,5)
# In[7]:
# 5
mt.log(5,10)
# In[8]:
# 6
mt.e**0.555457
# # Ejercicio 1 - B
# In[9]:
y=mt.pi
z=4
x=(1+y)/(1+2*z**2)
print(x)
# # Ejercico 1 - C
# In[10]:
x=-90
y=mt.pi
z=mt.sqrt(x**2+y**2)
print(z)
# # Ejercicio 2
# In[11]:
#Punto #1
x=[1,-5,31,-1,-9,-1,0,18,90,mt.pi]
y=[1,9,-3,1,-99,-10,2,-11,0,2]
print(x)
print(y)
# In[13]:
#Punto #2
import statistics as st
st.mean(x)
st.pvariance(x)
st.pstdev(x)
# In[14]:
#Punto #3
st.mean(y)
st.pvariance(y)
st.pstdev(y)
# In[15]:
#Punto #4
import numpy as np
print("El coeficiente de correlación entre x y y es:",np.corrcoef(x,y)[0,1])
# In[16]:
#Punto #5
x[2:7]
# In[17]:
# Punto 6
y[2:7]
# In[18]:
# Punto 7
y[:-4:-1]
# In[19]:
# Punto 8
print(x[:-11:-1])
# # Ejercicio 3
# In[20]:
import pandas as pd
datos = {'Genero': ["M","F","F","F","M","F"],
'Peso': [76,67,55,57,87,48],
'Edad': [25,23,19,18,56,13],
'Nivel Educativo': ["Lic","Bach","Bach","Bach","Dr","MSc"]}
mi_df = | pd.DataFrame(datos) | pandas.DataFrame |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = | pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
#Importando Librerias Necesarias para utilizar Flask
from flask import Flask, render_template, json, request, flash, session
from flask import redirect, jsonify, make_response
#Importando Librerias para utilizar el API de Spotify
import spotipy.util as util
from spotipy import oauth2
import spotipy
#Importando Libreria Pandas
import pandas as pd
#Importando Libreria Pandas-HighCharts
import pandas_highcharts.core
#Creando una instancia de Flask
# __name__ es una variable especial que obtiene como valor la cadena "__main__"
# cuando estás ejecutando el script.
app = Flask(__name__)
#Se declara una clave secreta. Todo lo que requiera cifrado
#(para protegerse contra la manipulación por parte de los atacantes) requiere
#la configuración de la clave secreta.
app.secret_key = 'random string'
#Datos para utilizar el API de Spotify, estos se pueden obtener desde:
#https://beta.developer.spotify.com/dashboard creando una app
username = ''
SPOTIPY_CLIENT_ID = ''
SPOTIPY_CLIENT_SECRET = ''
SPOTIPY_REDIRECT_URI = 'http://localhost:8081/callback'
SCOPE = 'user-read-private user-read-email user-read-playback-state user-read-currently-playing user-library-read user-top-read user-read-recently-played'
CACHE = '.spotipyoauthcache'
#Funcion que devuelve la url del cover de la ultima cancion escuchada por
#el usuario
def read_lastsongimg():
#sp es una instancia de la Spotipy
#Para mayor informacion acerca de Spotipy visitar
#http://spotipy.readthedocs.io/en/latest/
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_recently_played(limit=50)
return results['items'][0]['track']['album']['images'][0]['url']
#Funcion que devuelve el nombre de la ultima cancion escuchada por el usuario
def read_lastsong():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_recently_played(limit=50)
return results['items'][0]['track']['name']
#Funcion que devuelve la url del cover de la ultima playlist creada por
#el usuario
def read_lastplaylistimg():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_playlists(limit=1)
return results['items'][0]['images'][0]['url']
#Funcion que devuelve el nombre de la ultima playlist creada por el usuario
def read_lastplaylist():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_playlists(limit=1)
return results['items'][0]['name']
#Funcion que devuelve la url del cover del artista top escuchado por el usuario
def read_topartistimg():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_top_artists(limit=1)
return results['items'][0]['images'][0]['url']
#Funcion que devuelve el nombre del artista top escuchado por el usuario
def read_topartist():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_top_artists(limit=1)
return results['items'][0]['name']
#Funcion que devuelve la url del cover de la cancion top escuchada por el usuario
def read_topsongimg():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_top_tracks(limit=1)
return results['items'][0]['album']['images'][0]['url']
#Funcion que devuelve el nombre de la cancion top escuchada por el usuario
def read_topsong():
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_top_tracks(limit=1)
return results['items'][0]['name']
#Esa función está asignada a la URL de inicio '/'.
#Eso significa que cuando el usuario navega a localhost: 8081,
#la función de inicio se ejecutará y devolverá su resultado en la página web.
@app.route('/')
def main():
#Verifica si existe un usuario loggeado en la aplicacion, si es asi
#despliega el dashboard, en caso contrario devuelve la pantalla de login
if session.get('user'):
#Se emplea pandas para leer un archivo CSV para utilizar con Highcharts
df = pd.read_csv('csv/test.csv', index_col='Date', parse_dates=True)
#Se crea un dataSet con pandas highcharts para ser enviada a la pagina web
dataSet = pandas_highcharts.core.serialize(
df, render_to='my-chart', output_type='json', title='Test')
#se retorna utilizando jinja2 el template dspotify.html con la grafica como parametro JSON
return render_template('dspotify.html', chart=dataSet)
else:
#en caso no exista un usuario loggeado devuelve el template de login
return render_template('login.html')
#Esa función está asignada a la URL de logout '/logout'
@app.route('/logout')
def logout():
#Elimina el usuario y lo redirige a la pantalla de inicio
session.pop('user', None)
return redirect('/')
#Esa función está asignada a la URL de spotify para obtener un token '/callSpotify'
#empleando el metodo POST
@app.route('/callSpotify', methods=['POST'])
def callSpotify():
#Redirige el usuario para que se autentique con las credenciales de Spotify
return redirect("https://accounts.spotify.com/authorize?client_id=23278973c92a4269829378f645f382b2&response_type=code&redirect_uri=http%3A%2F%2Flocalhost%3A8081%2Fcallback&scope=user-library-read+user-read-currently-playing+user-read-email+user-read-playback-state+user-read-private+user-read-recently-played+user-top-read")
#Esa función está asignada a la Redireccion de la URL de Spotify especificada en la api de Spotify
@app.route("/callback")
def callback():
#El api devuelve un codigo
code = request.args['code']
#Si el codigo no es nulo, empleamos OAUTH2 para llamar al API de Spotify cada vez que realicemos una consulta
if len(code) > 0:
if (code != ''):
#Creamos un objeto para autenticarnos empleando los datos de nuestra app de Spotify
spAuth = spotipy.oauth2.SpotifyOAuth(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET,
SPOTIPY_REDIRECT_URI, state=None, scope=SCOPE, cache_path=None, proxies=None)
#Obtenemos el token para emplear el Api de Spotify
token = spAuth.get_access_token(code)
#Empleamos el token de spotify para identificar la sesion de los usuarios
session['user'] = token['access_token']
#Ya al estar autenticados se redirigen a los usuarios hacia la pagina de Table
return redirect('/table')
else:
#En caso de no obtener un codigo desde el login de Spotify redireccionamos
#al usuario a la pagina de inicio y desplegamos un mensaje flotante
#empleando la funcion flash
flash('Email o Contraseña Incorrectos')
return render_template('/')
else:
flash('Email o Contraseña Incorrectos')
return render_template('/')
#Esa función está asignada a la URL de Table '/table', que muestra las ultimas 50
#canciones escuchadas por el usuario
@app.route('/table')
def table():
#Si hay un usuario registrado nos regresa una pagina mostrando las ultimas 50 canciones del usuario
if session.get('user'):
#Creamos un objeto Spotify empleando el token obtenido, para realizar las consultas
sp = spotipy.Spotify(auth=session['user'])
results = sp.current_user_recently_played(limit=50)
items = []
columns = []
#Creamos un diccionario con los resultados obtenidos
for item in results['items']:
items.append({
"artist": item['track']['artists'][0]['name'],
"song": item['track']['name'],
"uri": """<a href=" """ + item['track']['uri'] + """ ">""" + item['track']['uri'] + """</a>"""
})
#En base al diccionario obtenido creamos un DataFrame empleando Pandas
df = | pd.DataFrame(items) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from mytools.myutils import updateFile, inputInteger
# ######################### Plot Functions #####################################
# Distinct color list
colorList = [(0, 0, 0), (230, 25, 75), (60, 180, 75), (255, 225, 25), (0, 130, 200),\
(245, 130, 48), (145, 30, 180), (70, 240, 240), (240, 50, 230),\
(210, 245, 60), (0, 128, 128), (230, 190, 255), (170, 110, 40),\
(255, 250, 200), (128, 0, 0), (170, 255, 195), (128, 128, 0),\
(255, 215, 180), (0, 0, 128), (128, 128, 128), (255, 255, 255)]
colorsArrayNormalized = np.array(colorList)/255
def plot(data, title, dataSource='', pdays=28, kind='line'):
"""Plots the passed dataframe.
kind (str): 'line' or 'bar'
"""
loc = MultipleLocator(1)
if len(data.columns) <= 10:
ax = data.iloc[-pdays:].plot(kind=kind, style='.-', grid=True, title=title)
else:
ax = data.iloc[-pdays:].plot(kind=kind, style='.-', grid=True, title=title, color=colorsArrayNormalized)
ax.xaxis.set_major_locator(loc)
ax.grid(True, which='major')
plt.annotate(dataSource, xy=(0.55,-0.07), xytext=(0.55,-0.07), xycoords='axes fraction',
textcoords='offset points', va='top', fontsize=8)
plt.show()
def plottrend(data, dataTrend, title, dataSource='', pdays=28):
"""Plots a given datafram and its pre-calculated given trend"""
loc = MultipleLocator(1)
ax = data.iloc[-pdays:].plot(kind='line', style='.-', grid=True, title=title)
colors = [line.get_color() for line in ax.get_lines()]
dataTrend.plot(ax=ax, style = '--', color=colors, legend=False)
ax.xaxis.set_major_locator(loc)
ax.grid(True, which='major')
plt.annotate(dataSource, xy=(0.55,-0.07), xytext=(0.55,-0.07), xycoords='axes fraction',
textcoords='offset points', va='top', fontsize=8)
plt.show()
# ################## Getting and processing Data ###############################
# ++++++++++++++++ Population Data +++++++++++++
def processPopulationData(file = './data/population_by_country_2020.csv'):
dfPopulation = pd.read_csv(file)
dfPopulation.rename(columns={'Country (or dependency)' : 'country',
'Population (2020)' : 'population'}, inplace=True)
population = dfPopulation[['country','population']]
population.set_index('country', inplace = True)
# Adding World Total
population = population.append(pd.DataFrame(data = [population.sum()], index=['World']))
# for compatibility with COVID Datafreames
population = population.rename(
index={'United States':'Usa', 'South Korea':'Korea, South', 'Myanmar': 'Burma',
'Czech Republic (Czechia)':'Czechia'})
return population
# +++++++++++++ COVID-19 Data +++++++++++++++++++
def processDf(df):
"""Preprocess the raw dataframe (that results from the .csv import)"""
df.drop(columns=['Province/State','Lat', 'Long'], inplace=True)
df.rename(columns={'Country/Region' : 'region'}, inplace=True)
df = df.groupby(['region']).sum() # Group Sum population of the same country/reguon (divided in states unit now)
df = df.T # Columnx are countries, rows dates.
# Renaming Index values for consistency with Population Data
df = df.rename(columns={'US':'Usa', 'Taiwan*':'Taiwan'})
df.index = pd.to_datetime(df.index, format="%m/%d/%y")
df['World'] = df.sum(axis=1)
df['WorldExceptChina'] = df['World'] - df['China']
return df
def getCovIdData(rootUrl = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master'\
'/csse_covid_19_data/csse_covid_19_time_series/'):
urlConfirmed = rootUrl+'time_series_covid19_confirmed_global.csv'
urlDeaths = rootUrl+'time_series_covid19_deaths_global.csv'
urlRecovered = rootUrl+'time_series_covid19_recovered_global.csv'
# ########## Confirmed #################
updateFile(urlConfirmed, './data/confirmed.csv', mtime = 0.25)
# Passing url directly to pd.read_csv() is also possible and valid,
# but keeping an updated local file and avoid unecessary downloads instead
dfConfirmed = | pd.read_csv('./data/confirmed.csv') | pandas.read_csv |
# Run like:
## heroku run --size=performance-l python user_summary.py -r heroku
# Or run in the background like:
## heroku run:detached --size=performance-l python user_summary.py -r heroku
import pandas as pd
import numpy as np
import os
import json
import gspread
from datetime import datetime
from app import get_db_cursor
from package import Package
from hubspot import HubSpot
from intercom import intercom
hs = HubSpot()
hs.companies()
def params_changed(x):
defaults = {
'cost_bigdeal_increase': 5.0,
'cost_alacart_increase': 8.0,
'cost_content_fee_percent': 5.7,
'cost_ill': 17.0,
'ill_request_percent_of_delayed': 5.0,
'weight_citation': 10.0,
'weight_authorship': 100.0,
'include_bronze': True, # bronze OA
'include_social_networks': True, # research gate OA
'include_submitted_version': True, # green OA
}
x_filt = {key: x[key] for key in list(defaults.keys())}
differs_log = defaults != x_filt
diff_dict = {k: x_filt[k] for k in x_filt if k in x_filt and defaults[k] != x_filt[k]}
return differs_log, diff_dict
# Get institution ids that have Unsub users w/ permissions access
with get_db_cursor() as cursor:
cmd = """select distinct(ji.id),display_name,created,is_consortium,consortium_id,ror.ror_id
from jump_institution as ji
join jump_user_institution_permission as juip on ji.id=juip.institution_id
join jump_ror_id as ror on ji.id=ror.institution_id
where not ji.is_demo_institution;
"""
cursor.execute(cmd)
rows = cursor.fetchall()
institutions = pd.DataFrame(rows, columns=['institution_id','name','created','is_consortium','consortium_id','ror_id'])
# Consortia
institutions['is_consortium'].fillna(False, inplace=True)
consortia = institutions[institutions['is_consortium']]
## drop demo/test accounts
consortia = consortia[~consortia['name'].str.contains("Demo|Testing")]
# Non-consortia
non_consortia = institutions[~institutions['is_consortium']]
## exclude demo/test institutions
non_consortia = non_consortia[~non_consortia['institution_id'].str.contains("institution-testing")]
non_consortia = non_consortia[~non_consortia['institution_id'].str.contains("institution-demo")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Demo")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("DEMO")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Test")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("TEST")]
non_consortia = non_consortia[~non_consortia['name'].str.contains("Scott")]
# Each institution
# institution="institution-jscQRozbejja"
# it = non_consortia[0:20].iterrows()
# row = next(it)[1]
# non_consortia.iterrows()[572]
all_institutions = []
for index, row in non_consortia.iterrows():
print(row["ror_id"])
with get_db_cursor() as cursor:
cmd = "select * from jump_account_package where institution_id = %s"
cursor.execute(cmd, (row["institution_id"],))
rows_inst = cursor.fetchall()
if not rows_inst:
institution_pkgs = pd.DataFrame({"institution_name": row["name"], 'ror_id':row["ror_id"]},
index = [0])
# institution_pkgs["current_deal"] = hs.current_deal(ror_id=row["ror_id"])
# company = hs.filter_by_ror_id(ror_id=row["ror_id"])
else:
institution_pkgs = pd.DataFrame(rows_inst, columns=['account_id','package_id','publisher','package_name','created','con_package_id','institution_id','is_demo','big_deal_cost','is_deleted','updated','default_to_no_perpetual_access','currency','is_dismissed_warning_missing_perpetual_access','is_dismissed_warning_missing_prices','big_deal_cost_increase'])
institution_pkgs.drop(["account_id","con_package_id","is_dismissed_warning_missing_perpetual_access","is_dismissed_warning_missing_prices","default_to_no_perpetual_access","updated"], axis=1, inplace=True)
institution_pkgs["institution_name"] = row["name"]
institution_pkgs["ror_id"] = row["ror_id"]
institution_pkgs["current_deal"] = hs.current_deal(ror_id=row["ror_id"])
company = hs.filter_by_ror_id(ror_id=row["ror_id"])
consortia = None
consortium_account = None
date_last_paid_invoice = None
amount_last_paid_invoice = None
if company:
consortia = company[0].get('consortia')
consortium_account = company[0].get('consortium_account')
dlpi = company[0].get('date_last_paid_invoice')
date_last_paid_invoice = datetime.strptime(dlpi, '%m/%d/%Y').strftime("%Y-%m-%d") if dlpi else None
alpi = company[0].get('amount_last_paid_invoice')
amount_last_paid_invoice = float(alpi) if alpi else None
institution_pkgs["consortia"] = consortia
institution_pkgs["consortium_account"] = consortium_account
institution_pkgs["date_last_paid_invoice"] = date_last_paid_invoice
institution_pkgs["amount_last_paid_invoice"] = amount_last_paid_invoice
institution_pkgs["created_inst"] = row['created'].strftime("%Y-%m-%d")
# intercom
intlastseen = None
emaillastseen = None
with get_db_cursor() as cursor:
cmd = "select * from jump_debug_admin_combo_view where institution_id = %s"
cursor.execute(cmd, (row["institution_id"],))
rows_users = cursor.fetchall()
if rows_users:
emails = list(filter(lambda x: x is not None, [w['email'] for w in rows_users]))
domain = None
if company:
domain = company[0].get('domain')
intlastseen, emaillastseen = intercom(emails, domain)
institution_pkgs["intercom_last_seen"] = intlastseen
institution_pkgs["intercom_last_seen_email"] = emaillastseen
# end intercom
# packages
pkgid = institution_pkgs.get('package_id')
if not isinstance(pkgid, pd.Series):
all_institutions.append(institution_pkgs)
else:
pkg_ids = pkgid.to_list()
pkg_dict_list = []
# This is the slow part: queries for each package
for pkg in pkg_ids:
try:
pkg = Package.query.get(pkg)
mpnum = 0
mp = list(filter(lambda x: x['id'] == "missing_prices", pkg.warnings))
if len(mp):
mpnum = len(mp[0]['journals'])
saved_scenarios = pkg.saved_scenarios
scenario_configs = [params_changed(w.to_dict_definition()['configs']) for w in saved_scenarios]
scenario_dates = [w.created for w in saved_scenarios]
scenario_dates.sort()
pkg_dict_list.append({"package_id":pkg.package_id,
# "created_pkg": pkg.created,
"has_complete_counter_data": pkg.has_complete_counter_data,
"perpetual_access": pkg.data_files_dict['perpetual-access']['is_live'],
"custom_price": pkg.data_files_dict['price']['is_live'],
"missing_prices": mpnum,
"is_feeder_package": pkg.is_feeder_package,
"is_feedback_package": pkg.is_feedback_package,
"scenarios": len(pkg.scenario_ids),
"scenario_user_subrs": any([len(w.to_dict_definition()['subrs']) > 0 for w in pkg.saved_scenarios]),
"scenario_param_chgs": any([x[0] for x in scenario_configs]),
"scenario_param_str": ",".join([str(x[1]) for x in scenario_configs]),
"created_sce_first": min(scenario_dates).strftime("%Y-%m-%d") if scenario_dates else None,
"created_sce_last": max(scenario_dates).strftime("%Y-%m-%d") if scenario_dates else None, })
except Exception as e:
pkg_dict_list.append({})
pkg_details = pd.DataFrame(pkg_dict_list)
all_institutions.append(institution_pkgs.merge(pkg_details, on="package_id"))
# len(all_institutions)
# all_institutions
all_institutions_df = pd.concat(all_institutions)
created_pkg_new = [w.strftime("%Y-%m-%d") if isinstance(w, pd.Timestamp) else w for w in all_institutions_df['created'].to_list()]
del all_institutions_df['created']
all_institutions_df['created_pkg'] = created_pkg_new
all_institutions_df = all_institutions_df[["institution_id","institution_name","ror_id","created_inst","current_deal","consortia","consortium_account","date_last_paid_invoice","amount_last_paid_invoice",
"intercom_last_seen", "intercom_last_seen_email",
"package_id","package_name","created_pkg","publisher","is_deleted",
"currency","big_deal_cost","big_deal_cost_increase","has_complete_counter_data",
"perpetual_access","custom_price","is_feeder_package","is_feedback_package",
"created_sce_first", "created_sce_last",
"scenarios", "scenario_user_subrs", "scenario_param_chgs", "scenario_param_str",]]
pkg_file = 'non_consortia_pkg_level.csv'
all_institutions_df.to_csv(pkg_file, index=False)
# aggregate package level data up to institutions
inst_level = all_institutions_df.copy()
inst_level = inst_level[~inst_level['is_deleted'].fillna(False) & ~inst_level['is_feeder_package'].fillna(False) & ~inst_level['is_feedback_package'].fillna(False)]
inst_level['created_sce_last'] = | pd.to_datetime(inst_level['created_sce_last']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import sys
import gpflow
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes
from sklearn import svm
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import silhouette_score
from fffit.utils import (
shuffle_and_split,
values_real_to_scaled,
values_scaled_to_real,
variances_scaled_to_real,
)
from fffit.plot import (
plot_model_performance,
plot_slices_temperature,
plot_slices_params,
plot_model_vs_test,
)
from fffit.models import run_gpflow_scipy
sys.path.append("../")
from utils.r125 import R125Constants
from utils.id_new_samples import prepare_df_density
R125 = R125Constants()
liquid_density_threshold = 500 # kg/m^3
iternum = 4
csv_path = "/scratch365/rdefever/hfcs-fffit/hfcs-fffit/analysis/csv/"
# Load in all parameter csvs and result csvs
param_csv_names = [
"r125-density-iter" + str(i) + "-params.csv" for i in range(1, iternum + 1)
]
result_csv_names = [
"r125-density-iter" + str(i) + "-results.csv" for i in range(1, iternum + 1)
]
df_params = [
pd.read_csv(csv_path + param_csv_name, index_col=0)
for param_csv_name in param_csv_names
]
df_results = [
pd.read_csv(csv_path + result_csv_name, index_col=0)
for result_csv_name in result_csv_names
]
# Concatenate all parameter sets and results
df_params = pd.concat(df_params).reset_index(drop=True)
df_results = pd.concat(df_results).reset_index(drop=True)
# Create a df with the MSE for each parameter set
# and add the parameter set idx
df_results["expt_density"] = df_results["temperature"].apply(
lambda x: R125.expt_liq_density[int(x)]
)
df_results["sq_err"] = (df_results["density"] - df_results["expt_density"]) ** 2
df_mse = (
df_results.groupby(list(R125.param_names))["sq_err"].mean().reset_index(name="mse")
)
scaled_param_values = values_real_to_scaled(
df_mse[list(R125.param_names)], R125.param_bounds
)
param_idxs = []
param_vals = []
for params1 in scaled_param_values:
for idx, params2 in enumerate(df_params[list(R125.param_names)].values):
if np.allclose(params1, params2):
param_idxs.append(idx)
param_vals.append(params2)
break
df_mse["param_idx"] = param_idxs
df_mse[list(R125.param_names)] = param_vals
# Plot all with MSE < 625
g = seaborn.pairplot(
pd.DataFrame(df_mse[df_mse["mse"] < 625.0], columns=list(R125.param_names))
)
g.set(xlim=(-0.1, 1.1), ylim=(-0.1, 1.1))
g.savefig("R125-all-MSE.lt.625.pdf")
# Plot all with MSE < 100
g = seaborn.pairplot(
pd.DataFrame(df_mse[df_mse["mse"] < 100.0], columns=list(R125.param_names))
)
g.set(xlim=(-0.1, 1.1), ylim=(-0.1, 1.1))
g.savefig("R125-all-MSE.lt.100.pdf")
top_param_set = df_mse[df_mse["mse"] < 625]
# Greedy search to ID top params
distance = 2.3
final_param_set = pd.DataFrame(columns=top_param_set.columns)
not_final_param_set = pd.DataFrame(columns=top_param_set.columns)
while len(top_param_set > 0):
top_param_set = top_param_set.sort_values("mse")
final_param_set = final_param_set.append(top_param_set.iloc[[0]])
# Remove anything within distance
l1_norm = np.sum(
np.abs(
top_param_set[list(R125.param_names)].values
- final_param_set[list(R125.param_names)].iloc[[-1]].values
),
axis=1,
)
points_to_remove = np.where(l1_norm < distance)[0]
not_final_param_set = not_final_param_set.append(
top_param_set.iloc[points_to_remove]
)
top_param_set.drop(index=top_param_set.index[points_to_remove], inplace=True)
final_param_set_mse625 = final_param_set
top_param_set = df_mse[df_mse["mse"] < 100]
# Greedy search to ID top params
distance = 2.13
final_param_set = | pd.DataFrame(columns=top_param_set.columns) | pandas.DataFrame |
#Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# the full environment I used to test this is in basic_project_stats.yml
import sys
# file with raw classifications (csv)
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
print("\nUsage: %s classifications_infile" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.\n")
print(" Optional inputs:")
print(" workflow_id=N")
print(" specify the program should only consider classifications from workflow id N")
print(" workflow_version=M")
print(" specify the program should only consider classifications from workflow version M")
print(" (note the program will only consider the major version, i.e. the integer part)")
print(" outfile_csv=filename.csv")
print(" if you want the program to save a sub-file with only classification info from the workflow specified, give the filename here")
print(" --time_elapsed")
print(" specify the program should compute classification durations and total classification work effort")
print(" --remove_duplicates")
print(" remove duplicate classifications (subject-user pairs) before analysis.")
print(" memory-intensive for big files; probably best to pair with outfile_csv so you save the output.")
print(" --keep_nonlive")
print(" by default the program ignores classifications made while the project wasn't 'Live'; setting this will keep them in.")
print(" --keep_allcols")
print(" by default the program only keeps columns required for stats; use this with a specified outfile_csv to save all columns, including annotations. (If you're not using outfile_csv this will just waste memory.)")
print("\nAll output will be to stdout (about 1-2 paragraphs' worth).\n")
sys.exit(0)
import numpy as np # works in 1.10.1
import pandas as pd # works in 0.13.1
import datetime
import dateutil.parser
import json, ujson
import gc
# default value is not to care about workflow ID or version
workflow_id = -1
workflow_version = -1
# by default we won't worry about computing how much time effort the volunteers cumulatively spent
time_elapsed = False
# by default we won't write the subset of classifications we used to a new csv file
output_csv = False
# by default we'll ignore the possibility of duplicate classifications
# note duplicates are relatively rare, usually <2% of all classifications
# the Zooniverse has squashed several bugs related to this, but some still
# happen client-side and there's nothing we can do about that.
remove_duplicates = False
# by default, restrict the analysis to "Live" classifications
keep_nonlive = False
# by default, don't keep every column of the classifications when writing to an outfile
keep_allcols = False
# check for other command-line arguments
if len(sys.argv) > 2:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
elif arg[0] == "workflow_version":
workflow_version = float(arg[1])
elif (arg[0] == "outfile_csv") | (arg[0] == "outfile"):
outfile_csv = arg[1]
output_csv = True
elif arg[0] == "--keep_allcols":
keep_allcols = True
elif arg[0] == "--time_elapsed":
time_elapsed = True
elif arg[0] == "--remove_duplicates":
remove_duplicates = True
elif arg[0] == "--keep_nonlive":
keep_nonlive = True
# columns currently in an exported Panoptes classification file:
# classification_id,user_name,user_id,user_ip,workflow_id,workflow_name,workflow_version,created_at,gold_standard,expert,metadata,annotations,subject_data,subject_ids
# classification_id identifies the specific classification - should be unique for each row in this file
# user_name is either their registered name or "not-logged-in"+their hashed IP
# user_id is their numeric Zooniverse ID or blank if they're unregistered
# user_ip is a hashed version of their IP
# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:
# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/
# workflow_name is the name you gave your workflow (for sanity checks)
# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big
# created_at is the date the entry for the classification was recorded
# gold_standard is 1 if this classification was done in gold standard mode
# expert is 1 if this classification was done in expert mode... I think
# metadata (json) is the data the browser sent along with the classification.
# Includes browser information, language, started_at and finished_at
# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification
# (the duration elapsed between consecutive created_at by the same user is another way)
# the difference here is back-end vs front-end
# annotations (json) contains the actual classification information
# which for this analysis we will ignore completely, for now
# subject_data is cross-matched from the subjects table and is for convenience in data reduction
# subject_ids has just the subject ids in the given classification
# here we will ignore this too, except to count subjects once.
# we'll also ignore classification_id, user_ip, workflow information, gold_standard, and expert.
#
# Print out the input parameters just as a sanity check
print("Computing project stats using:")
print(" infile: %s" % classfile_in)
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
#
# The Gini coefficient measures inequality in distributions of things.
# It was originally conceived for economics (e.g. where is the wealth in a country?
# in the hands of many citizens or a few?), but it's just as applicable to many
# other fields. In this case we'll use it to see how classifications are
# distributed among classifiers.
# G = 0 is a completely even distribution (everyone does the same number of
# classifications), and ~1 is uneven (~all the classifications are done
# by one classifier).
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
# That range is generally indicative of a project with a loyal core group of
# volunteers who contribute the bulk of the classification effort, but balanced
# out by a regular influx of new classifiers trying out the project, from which
# you continue to draw to maintain a core group of prolific classifiers.
# Once your project is fairly well established, you can compare it to past Zooniverse
# projects to see how you're doing.
# If your G is << 0.7, you may be having trouble recruiting classifiers into a loyal
# group of volunteers. People are trying it, but not many are staying.
# If your G is > 0.9, it's a little more complicated. If your total classification
# count is lower than you'd like it to be, you may be having trouble recruiting
# classifiers to the project, such that your classification counts are
# dominated by a few people.
# But if you have G > 0.9 and plenty of classifications, this may be a sign that your
# loyal users are -really- committed, so a very high G is not necessarily a bad thing.
#
# Of course the Gini coefficient is a simplified measure that doesn't always capture
# subtle nuances and so forth, but it's still a useful broad metric.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
def get_duplicate_ids(grp):
# groupbys and dfs have slightly different indexing and just NOPE
#thegrp = pd.DataFrame(grp)
thegrp = grp
if len(thegrp) == 1:
return
else:
# we have a duplicate set, so return the details
return thegrp
def get_live_project(meta_json):
try:
return meta_json['live_project']
except:
# apparently some subject metadata doesn't have this? dunno?
return False
def get_live_project_incl_missing(meta_json):
try:
return meta_json['live_project']
except:
return -1
# Begin the main stuff
print("Reading classifications from %s" % classfile_in)
#classifications = pd.read_csv(classfile_in)
# the above will work but uses a LOT of memory for projects with > 1 million
# classifications. Nothing here uses the actual classification data so don't read it
'''
If you are using this code on an older project, where the data export is from
before subject_ids were exported as their own column, change "subject_id" below
to "subject_data", and then when you define the groupby "by_subject" and count
subjects, you'll need to use subject_data instead of subject_ids.
Apologies for doing this, but subject_data contains the whole manifest so for
big projects with big catalogs it can take up a lot of memory, so we don't want to
use it if we don't have to.
'''
cols_keep = ["classification_id", "user_name", "user_id", "user_ip", "workflow_id", "workflow_version", "created_at", "metadata", "subject_ids"]
if not keep_allcols:
try:
classifications = pd.read_csv(classfile_in, usecols=cols_keep)
except:
print("Some columns missing from classifications infile, reading without specifying columns (uses more memory)... ")
classifications = pd.read_csv(classfile_in)
else:
try:
classifications = pd.read_csv(classfile_in, low_memory=False)
except:
classifications = pd.read_csv(classfile_in)
cols_used = classifications.columns.tolist()
cols_out = classifications.columns.tolist()
if not 'created_day' in cols_used:
cols_used.append('created_day')
if not 'meta_json' in cols_used:
cols_used.append('meta_json')
n_class_raw = len(classifications)
# now restrict classifications to a particular workflow id/version if requested
if (workflow_id > 0) | (workflow_version > 0):
# only keep the stuff that matches these workflow properties
if (workflow_id > 0):
print("Considering only workflow id %d" % workflow_id)
in_workflow = classifications.workflow_id == workflow_id
else:
# the workflow id wasn't specified, so just make an array of true
in_workflow = np.array([True for q in classifications.workflow_id])
if (workflow_version > 0):
classifications['version_int'] = [int(q) for q in classifications.workflow_version]
print("Considering only major workflow version %d" % int(workflow_version))
# we only care about the major workflow version, not the minor version
in_version = classifications.version_int == int(workflow_version)
else:
in_version = np.array([True for q in classifications.workflow_version])
if (sum(in_workflow & in_version) == 0):
print("ERROR: your combination of workflow_id and workflow_version does not exist!\nIgnoring workflow id/version request and computing stats for ALL classifications instead.")
#classifications = classifications_all
else:
# select the subset of classifications
classifications = classifications[in_workflow & in_version]
del in_workflow
del in_version
else:
# just use everything
#classifications = classifications_all
workflow_ids = classifications.workflow_id.unique()
# this takes too much CPU time just for a print statement. Just use float versions
#classifications['version_int'] = [int(q) for q in classifications.workflow_version]
version_ints = classifications.workflow_version.unique()
print("Considering all classifications in workflow ids:")
print(workflow_ids)
print(" and workflow_versions:")
print(version_ints)
# Remove classifications collected before the project went Live
# note: it makes logical sense to do this *before* we extract the classifications
# from the workflow we care about, *but* the meta_json setting step (which we
# need in order to extract Live project status) can take a while (up to ~minutes)
# and adds to memory usage, so I'd rather do it after we've already culled
# the table of potentially a lot of unused rows.
# OTOH culling duplicates takes more time and memory than culling unused workflow
# versions, so wait to do that until after we've removed non-Live classifications
# first, extract the metadata column into a json we can read entries for
#
# ujson is quite a bit faster than json but seems to use a bit more memory as it works
classifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]
if keep_nonlive:
print("Retaining all non-live classifications in analysis.")
else:
# would that we could just do q['live_project'] but if that tag is missing for
# any classifications (which it is in some cases) it crashes
classifications['live_project'] = [get_live_project(q) for q in classifications.meta_json]
# if this line gives you an error you've read in this boolean as a string
# so need to convert "True" --> True and "False" --> False
class_live = classifications[classifications.live_project].copy()
n_class_thiswf = len(classifications)
n_live = sum(classifications.live_project)
n_notlive = n_class_thiswf - n_live
print(" Removing %d non-live classifications..." % n_notlive)
# don't make a slice but also save memory
classifications = pd.DataFrame(class_live)
del class_live
gc.collect()
# if we've been asked to remove duplicates, do that now
if remove_duplicates:
'''
a duplicate can be that the classification id is submitted twice by the client
but it can also be that the classifier classified the same subject twice in different classification_ids.
So identify duplicates based on username + subject id + workflow info, not based on classification_id.
'''
subj_classifications = classifications.groupby('user_name subject_ids workflow_id workflow_version'.split())
n_class = len(classifications)
# just take the first of each of the groups
classifications_nodups = subj_classifications.head(1)
n_class_nodup = len(classifications_nodups)
n_dups = n_class - n_class_nodup
if n_dups == 0:
print("Searched for duplicate classifications; none found.")
else:
duplicate_outfile = classfile_in.replace(".csv", "_duplicated_only.csv")
if duplicate_outfile == classfile_in:
duplicate_outfile += "_duplicated_only.csv"
print("Found %d duplicate classifications (%.2f percent of total)." % (n_dups, float(n_dups)/float(n_class)*100.0))
# get the duplicate classifications and save them before we remove them
#class_dups = pd.DataFrame(subj_classifications.apply(get_duplicate_ids))
# if you want to keep a record of everything with just the dups flagged,
# this is your thing
#dups_flagged = pd.merge(classifications, classifications_nodups['classification_id subject_id'.split()], how='outer', on='classification_id', suffixes=('', '_2'), indicator=True)
# if you just need something that has only the dups in it, here you go
dups_only = classifications[~classifications.isin(classifications_nodups)].dropna(how='all')
# dups_only has the duplicates only - not the original classification in each set
# i.e. if classifications 123, 456, and 789 are all from the same user
# classifying the same subject, dups_only will only contain classifications
# 456 and 789. When we save the duplicate classifications we want to save
# the initial classification (that was later duplicated) as well, so we
# need to retrieve those.
# I don't see a really easy way to do it based on the groupby we already did
# (subj_classifications)
# so let's just define what identifies the duplicate (user_name + subject_ids)
# and pick them out.
# even for a reasonably big dataset this is relatively fast (seconds, not minutes)
try:
dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(int).astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)
except:
dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)
# n_dup_pairs tracks unique user-subject pairs that were duplicated
dup_pairs = dups_only['user_subj_pair'].unique()
n_dup_pairs = len(dup_pairs)
try:
classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(int).astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)
except:
classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)
# this keeps things that are any part of a duplicate set, including first
is_a_dup = classifications['user_subj_pair'].isin(dup_pairs)
class_dups = classifications[is_a_dup].copy()
# counts any classification that is any part of a duplicate set
n_partofdup = len(class_dups)
class_dups.to_csv(duplicate_outfile)
#print(class_dups.head(3))
# now throw away the duplicates (but keep the first of each set) from
# the main classifications table
classifications = pd.DataFrame(classifications_nodups)
del class_dups
del is_a_dup
print("Duplicates removed from analysis (%d unique user-subject-workflow groups)." % n_dup_pairs)
del subj_classifications
del classifications_nodups
gc.collect()
classifications['created_day'] = [q[:10] for q in classifications.created_at]
first_class_day = min(classifications.created_day).replace(' ', '')
last_class_day = max(classifications.created_day).replace(' ', '')
# save processing time and memory in the groupby.apply(); only keep the columns we're going to use or want to save
if output_csv:
if not keep_allcols:
# if we'll be writing to a file at the end of this we need to save a few extra columns
cols_used = ["classification_id", "user_name", "user_id", "user_ip", "created_at", "created_day", "metadata", "meta_json", "subject_ids", "workflow_id", "workflow_version"]
else:
if not keep_allcols:
cols_used = ["classification_id", "user_name", "user_id", "user_ip", "created_at", "created_day", "meta_json", "subject_ids"]
classifications = classifications[cols_used]
# collect() calls PyInt_ClearFreeList(), so explicitly helps free some active memory
gc.collect()
# grab the subject counts
n_subj_tot = len(classifications.subject_ids.unique())
by_subject = classifications.groupby('subject_ids')
subj_class = by_subject.created_at.aggregate('count')
# basic stats on how classified the subjects are
subj_class_mean = np.mean(subj_class)
subj_class_med = np.median(subj_class)
subj_class_min = np.min(subj_class)
subj_class_max = np.max(subj_class)
# free up some memory - note calling this does take CPU time but
# can free up GBs of active memory for big classification files
del by_subject
gc.collect()
# index by created_at as a timeseries
# note: this means things might not be uniquely indexed
# but it makes a lot of things easier and faster.
# update: it's not really needed in the main bit, but will do it on each group later.
#classifications.set_index('created_at_ts', inplace=True)
# get some user information
all_users = classifications.user_name.unique()
by_user = classifications.groupby('user_name')
# also count IP addresses
n_ip = len(classifications.user_ip.unique())
# get total classification and user counts
n_class_tot = len(classifications)
n_users_tot = len(all_users)
unregistered = [q.startswith("not-logged-in") for q in all_users]
n_unreg = sum(unregistered)
n_reg = n_users_tot - n_unreg
is_unreg_class = [q.startswith("not-logged-in") for q in classifications.user_name]
n_unreg_class = sum(is_unreg_class)
n_reg_class = n_class_tot - n_unreg_class
# for the leaderboard, which I recommend project builders never make public because
# Just Say No to gamification
# But it's still interesting to see who your most prolific classifiers are, and
# e.g. whether they're also your most prolific Talk users
nclass_byuser = by_user.created_at.aggregate('count')
nclass_byuser_ranked = nclass_byuser.copy()
nclass_byuser_ranked.sort_values(inplace=True, ascending=False)
# rename the columns properly so they'll print as useful csv headers
nclass_byuser_ranked.name = 'user_name'
nc = pd.DataFrame(nclass_byuser_ranked)
nc.columns = ['n_class']
# write this to a file, so you don't have to re-calculate it later
nclass_byuser_outfile = classfile_in.replace(".csv", "_nclass_byuser_ranked.csv")
# don't accidentally overwrite the classifications file just because someone
# renamed it to not end in .csv
if nclass_byuser_outfile == classfile_in:
nclass_byuser_outfile = "project_nclass_byuser_ranked.csv"
nc.to_csv(nclass_byuser_outfile)
# very basic stats
nclass_med = np.median(nclass_byuser)
nclass_mean = np.mean(nclass_byuser)
# Gini coefficient - see the comments above the gini() function for more notes
nclass_gini = gini(nclass_byuser)
print("\nOverall:\n\n%d classifications of %d subjects by %d classifiers," % (n_class_tot,n_subj_tot,n_users_tot))
print("%d logged in and %d not logged in, from %d unique IP addresses." % (n_reg,n_unreg,n_ip))
print("%d classifications were from logged-in users, %d from not-logged-in users.\n" % (n_reg_class, n_unreg_class))
print("That's %.2f classifications per subject on average (median = %.1f)." % (subj_class_mean, subj_class_med))
print("The most classified subject has %d classifications; the least-classified subject has %d.\n" % (subj_class_max,subj_class_min))
print("Median number of classifications per user: %.2f" %nclass_med)
print("Mean number of classifications per user: %.2f" % nclass_mean)
print("\nTop 10 most prolific classifiers:")
print(nclass_byuser_ranked.head(10))
print("\n\nGini coefficient for classifications by user: %.2f" % nclass_gini)
print("\nClassifications were collected between %s and %s." % (first_class_day, last_class_day))
print("The highest classification id considered here is %d.\n" % max(classifications.classification_id))
# if the input specified we should compute total time spent by classifiers, compute it
if time_elapsed:
# free up some memory
# do this inside the if because if we're not computing times then the program
# is about to end so this memory will be freed up anyway
del unregistered
del by_user
gc.collect()
classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
sa_temp = classifications['started_at_str']
fa_temp = classifications['finished_at_str']
#print("Creating timeseries...")#,datetime.datetime.now().strftime('%H:%M:%S.%f')
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print("Oops:\n%s" % the_error)
classifications['started_at'] = pd.to_datetime(sa_temp)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['finished_at'] = | pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z') | pandas.to_datetime |
# Pachages
import json
import re
import pandas as pd
from make_plot import df, return_graphs
# sklearn
from sklearn.externals import joblib
from sklearn.base import BaseEstimator, TransformerMixin
# Ploty
import plotly
# flask
from flask import Flask
from flask import render_template, request
# nltk
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
nltk.download(['punkt', 'wordnet', 'stopwords',
'averaged_perceptron_tagger'])
app = Flask(__name__)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
""" check if sentence start with a verb
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
if len(sentence_list) != 0:
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
if len(pos_tags) != 0:
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP', 'VBG'] or first_word == 'RT':
return True
return False
else:
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = | pd.Series(X) | pandas.Series |
##**************************************************************************************##
## Step1. Load Packages and Input Data ##
##**************************************************************************************##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm,metrics
from sklearn.svm import SVC,LinearSVC
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.metrics import matthews_corrcoef,auc, roc_curve,plot_roc_curve, plot_precision_recall_curve,classification_report, confusion_matrix,average_precision_score, precision_recall_curve
from pandas.core.frame import DataFrame
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import imblearn
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
############################# Step2: input data processing #####################
## giessen data
gi_data = np.load("/gi_CIP_FCGR200/alt_cnn_input.npy")
gi_pheno = pd.read_csv("CIP_gi_pheno.csv",index_col=0)
gi_data.shape,gi_pheno.shape
gi_data2 = gi_data.reshape(900,40000)
gi_pheno2 = gi_pheno.values
gi_pheno3 = gi_pheno2.reshape(900,)
gi_data2.shape,gi_pheno3.shape
X = gi_data2
y = gi_pheno3
X.shape,y.shape
## pubdata
pub_data = np.load("/pub_CIP_FCGR200/alt_cnn_input.npy")
pub_pheno = pd.read_csv("CIP_pub_pheno.csv",index_col=0)
pub_data.shape
pub_data2 = pub_data.reshape(1496,40000)
pub_pheno2 = pub_pheno.values
pub_pheno3 = pub_pheno2.reshape(1496,)
pub_data2.shape,pub_pheno3.shape
x_test = pub_data2
y_test = pub_pheno3
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
##**************************************************************************************##
## Step2. Training and evaluation of RF,LR, SVM ##
##**************************************************************************************##
## cross validation
cv = StratifiedKFold(n_splits=5)
rf = RandomForestClassifier(n_estimators=200, random_state=0)
lr = LogisticRegression(solver = 'lbfgs',max_iter=1000)
svm = SVC(kernel='linear', probability=True)
##*************** F1 + ROC curve
rf_tprs = []
rf_prs = []
rf_roc_aucs = []
rf_pr_aucs = []
rf_f1_matrix_out = []
rf_f1_report_out = []
rf_MCC_out = []
rf_pred_cls_out = []
rf_pred_prob_out = []
rf_y_test_out = []
rf_mean_fpr = np.linspace(0, 1, 100)
rf_mean_recall = np.linspace(0, 1, 100)
## LR
lr_tprs = []
lr_prs = []
lr_roc_aucs = []
lr_pr_aucs = []
lr_f1_matrix_out = []
lr_f1_report_out = []
lr_MCC_out = []
lr_pred_cls_out = []
lr_pred_prob_out = []
lr_y_test_out = []
lr_mean_fpr = np.linspace(0, 1, 100)
lr_mean_recall = np.linspace(0, 1, 100)
## SVM
svm_tprs = []
svm_prs = []
svm_roc_aucs = []
svm_pr_aucs = []
svm_f1_matrix_out = []
svm_f1_report_out = []
svm_MCC_out = []
svm_pred_cls_out = []
svm_pred_prob_out = []
svm_y_test_out = []
svm_mean_fpr = np.linspace(0, 1, 100)
svm_mean_recall = np.linspace(0, 1, 100)
fig,[ax1,ax2,ax3] = plt.subplots(nrows=1,ncols=3,figsize=(15, 4))
for i, (train, test) in enumerate(cv.split(X, y)):
## train the new model
rf.fit(X[train], y[train])
## roc curve
rf_viz = plot_roc_curve(rf, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax1)
rf_interp_tpr = np.interp(rf_mean_fpr, rf_viz.fpr, rf_viz.tpr)
rf_interp_tpr[0] = 0.0
rf_tprs.append(rf_interp_tpr)
rf_roc_aucs.append(rf_viz.roc_auc)
## evaluation metrics
rf_pred_cls = rf.predict(X[test])
rf_pred_prob = rf.predict_proba(X[test])[:,1]
rf_f1_matrix = confusion_matrix(y[test],rf_pred_cls)
rf_f1_report = classification_report(y[test],rf_pred_cls)
rf_MCC = matthews_corrcoef(y[test],rf_pred_cls)
### save evalu_metrics out
rf_pred_cls_out.append(rf_pred_cls)
rf_pred_prob_out.append(rf_pred_prob)
rf_f1_matrix_out.append(rf_f1_matrix)
rf_f1_report_out.append(rf_f1_report)
rf_MCC_out.append(rf_MCC)
rf_y_test_out.append(y[test])
## LR
lr.fit(X[train], y[train])
## roc curve
lr_viz = plot_roc_curve(lr, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax2)
lr_interp_tpr = np.interp(lr_mean_fpr, lr_viz.fpr, lr_viz.tpr)
lr_interp_tpr[0] = 0.0
lr_tprs.append(lr_interp_tpr)
lr_roc_aucs.append(lr_viz.roc_auc)
## evaluation metrics
lr_pred_cls = lr.predict(X[test])
lr_pred_prob = lr.predict_proba(X[test])[:,1]
lr_f1_matrix = confusion_matrix(y[test],lr_pred_cls)
lr_f1_report = classification_report(y[test],lr_pred_cls)
lr_MCC = matthews_corrcoef(y[test],lr_pred_cls)
### save evalu_metrics out
lr_pred_cls_out.append(lr_pred_cls)
lr_pred_prob_out.append(lr_pred_prob)
lr_f1_matrix_out.append(lr_f1_matrix)
lr_f1_report_out.append(lr_f1_report)
lr_MCC_out.append(lr_MCC)
lr_y_test_out.append(y[test])
## SVM
svm.fit(X[train], y[train])
## roc curve
svm_viz = plot_roc_curve(svm, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax3)
svm_interp_tpr = np.interp(svm_mean_fpr, svm_viz.fpr, svm_viz.tpr)
svm_interp_tpr[0] = 0.0
svm_tprs.append(svm_interp_tpr)
svm_roc_aucs.append(svm_viz.roc_auc)
## evaluation metrics
svm_pred_cls = svm.predict(X[test])
svm_pred_prob = svm.predict_proba(X[test])[:,1]
svm_f1_matrix = confusion_matrix(y[test],svm_pred_cls)
svm_f1_report = classification_report(y[test],svm_pred_cls)
svm_MCC = matthews_corrcoef(y[test],svm_pred_cls)
### save evalu_metrics out
svm_pred_cls_out.append(svm_pred_cls)
svm_pred_prob_out.append(svm_pred_prob)
svm_f1_matrix_out.append(svm_f1_matrix)
svm_f1_report_out.append(svm_f1_report)
svm_MCC_out.append(svm_MCC)
svm_y_test_out.append(y[test])
#### save predit_prob out
np.save("CIP_gi_FCGR_RF_y_pred_prob_out.npy",rf_pred_prob_out)
np.save("CIP_gi_FCGR_RF_y_test_out.npy",rf_y_test_out)
np.save("CIP_gi_FCGR_LR_y_pred_prob_out.npy",lr_pred_prob_out)
np.save("CIP_gi_FCGR_LR_y_test_out.npy",lr_y_test_out)
np.save("CIP_gi_FCGR_SVM_y_pred_prob_out.npy",svm_pred_prob_out)
np.save("CIP_gi_FCGR_SVM_y_test_out.npy",svm_y_test_out)
#### evaluation
rf_eva_pred_prob = rf.predict_proba(pub_data2)[:,1]
lr_eva_pred_prob = lr.predict_proba(pub_data2)[:,1]
svm_eva_pred_prob = svm.predict_proba(pub_data2)[:,1]
np.save("CIP_FCGR_RF_test_y_pred_prob.npy",rf_eva_pred_prob)
np.save("CIP_FCGR_LR_test_y_pred_prob.npy",lr_eva_pred_prob)
np.save("CIP_FCGR_SVM_test_y_pred_prob.npy",svm_eva_pred_prob)
np.save("CIP_FCGR_test_y_out.npy",pub_pheno3)
#### evaluation for under sample
#pub_x_under,pub_y_under
rf_eva_under_pred_prob = rf.predict_proba(pub_x_under)[:,1]
lr_eva_under_pred_prob = lr.predict_proba(pub_x_under)[:,1]
svm_eva_under_pred_prob = svm.predict_proba(pub_x_under)[:,1]
##**************************************************************************************##
## Step3. Training and evaluation of CNN ##
##**************************************************************************************##
############################# Step1: load pacakge #####################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from keras.utils import to_categorical
from keras.models import Sequential
from tensorflow.keras import activations
from sklearn.model_selection import KFold,StratifiedKFold
from keras.layers import Dense,Dropout, Flatten, Conv1D, Conv2D, MaxPooling1D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers import BatchNormalization
############################# Step2: load metrics function #####################
### F1 score, precision, recall and accuracy metrics
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
############################# Step3: input data processing #####################
X.shape,y.shape,pub_data2.shape,pub_pheno3.shape
#((900, 40000),(900,), (1496, 40000), (1496,))
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=123)
x_train.shape,x_test.shape,y_train.shape,y_test.shape
#((720, 40000), (180, 40000), (720,), (180,))
inputs = x_train.reshape(720,200,200,1)
inputs = inputs.astype('float32')
targets = to_categorical(y_train)
inputs.shape,targets.shape
x_test2 = x_test.reshape(180,200,200,1)
x_test2 = x_test2.astype('float32')
y_test2 = to_categorical(y_test)
pub_x_test = pub_data2.reshape(1496,200,200,1)
pub_x_test = pub_x_test.astype('float32')
pub_y_test = pub_pheno3
############################# Step4: model training #####################
batch_size = 8
no_classes = 2
no_epochs = 50
verbosity = 1
num_folds = 5
# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
fold_no = 1
model_history=[]
for train, test in kfold.split(inputs, targets):
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=3,activation='relu', input_shape=(200,200,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=8, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2,activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc',f1_m,precision_m, recall_m])
# Generate a print
print('--------------------------------')
print(f'Training for fold {fold_no} ...')
## checkpoint for saving model
filepath="CIP_gi_FCGR_CNN_weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True,mode='max')
callbacks_list = [checkpoint]
# Fit data to model
train_model = model.fit(inputs[train], targets[train],batch_size=batch_size,epochs=no_epochs,callbacks=callbacks_list,verbose=verbosity,validation_data=(inputs[test], targets[test]))
model_history.append(train_model.history)
# Increase fold number
fold_no = fold_no + 1
########## (2) save model
model.save_weights('CIP_gi_FCGR_CNN.model.h5')
# save model history
from pandas.core.frame import DataFrame
model_out = | DataFrame(model_history) | pandas.core.frame.DataFrame |
import pandas as pd
import numpy as np
import os
import sqlalchemy
import data_functions as datfunc
import utility_functions as utilfunc
import agent_mutation
from agents import Agents, Solar_Agents
from pandas import DataFrame
import json
# Load logger
logger = utilfunc.get_logger()
#%%
def check_table_exists(schema, table, con):
"""
Checks if table exists in schema
Parameters
----------
**schema** : 'SQL schema'
SQL schema in which to check if given table exists
**table** : 'SQL table'
SQL table to be searched
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**True or False** : 'bool'
Returns True if table exists in schema.
"""
sql = """SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = '{}' AND table_name = '{}');""".format(schema, table)
return pd.read_sql(sql, con).values[0][0]
def get_psql_table_fields(engine, schema, name):
"""
Creates numpy array of columns from specified schema and table
Parameters
----------
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**schema** : 'SQL schema'
SQL schema to pull table from
**name** : 'string'
Name of the table from which fields are retrieved
Returns
-------
numpy array : 'np.array'
Numpy array of columns
"""
sql = "SELECT column_name FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'".format(schema, name)
return np.concatenate(pd.read_sql_query(sql, engine).values)
def df_to_psql(df, engine, schema, owner, name, if_exists='replace', append_transformations=False):
"""
Uploads dataframe to database
Parameters
----------
**df** : 'pd.df'
Dataframe to upload to database
**engine** : 'SQL table'
SQL engine to intepret SQL query
**schema** : 'SQL schema'
Schema in which to upload df
**owner** : 'string'
Owner of schema
**name** : 'string'
Name to be given to table that is uploaded
**if_exists** : 'replace or append'
If table exists and if if_exists set to replace, replaces table in database. If table exists and if if_exists set to append, appendss table in database.
**append_transformations** : 'bool'
IDK
Returns
-------
**df** : 'pd.df'
Dataframe that was uploaded to database
"""
d_types = {}
transform = {}
f_d_type = {}
sql_type = {}
delete_list = []
orig_fields = df.columns.values
df.columns = [i.lower() for i in orig_fields]
for f in df.columns:
df_filter = pd.notnull(df[f]).values
if sum(df_filter) > 0:
f_d_type[f] = type(df[f][df_filter].values[0]).__name__.lower()
if f_d_type[f][0:3].lower() == 'int':
sql_type[f] = 'INTEGER'
if f_d_type[f][0:5].lower() == 'float':
d_types[f] = sqlalchemy.types.NUMERIC
sql_type[f] = 'NUMERIC'
if f_d_type[f][0:3].lower() == 'str':
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'list':
d_types[f] = sqlalchemy.types.ARRAY(sqlalchemy.types.STRINGTYPE)
transform[f] = lambda x: json.dumps(x)
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'ndarray':
d_types[f] = sqlalchemy.types.ARRAY(sqlalchemy.types.STRINGTYPE)
transform[f] = lambda x: json.dumps(list(x))
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'dict':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: json.dumps(
dict([(k_v[0], list(k_v[1])) if (type(k_v[1]).__name__ == 'ndarray') else (k_v[0], k_v[1]) for k_v in list(x.items())]))
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'interval':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: str(x)
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'dataframe':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: x.to_json() if isinstance(x,DataFrame) else str(x)
sql_type[f] = 'VARCHAR'
else:
orig_fields = [i for i in orig_fields if i.lower()!=f]
delete_list.append(f)
df = df.drop(delete_list, axis=1)
for k, v in list(transform.items()):
if append_transformations:
df[k + "_" + f_d_type[k]] = df[k].apply(v)
sql_type[k + "_" + f_d_type[k]] = sql_type[k]
del df[k]
del sql_type[k]
else:
df[k] = df[k].apply(v)
conn = engine.connect()
if if_exists == 'append':
fields = [i.lower() for i in get_psql_table_fields(engine, schema, name)]
for f in list(set(df.columns.values) - set(fields)):
sql = "ALTER TABLE {}.{} ADD COLUMN {} {}".format(schema, name, f, sql_type[f])
conn.execute(sql)
df.to_sql(name, engine, schema=schema, index=False, dtype=d_types, if_exists=if_exists)
sql = 'ALTER TABLE {}."{}" OWNER to "{}"'.format(schema, name, owner)
conn.execute(sql)
conn.close()
engine.dispose()
df.columns = orig_fields
return df
#%%
def get_scenario_settings(schema, con):
"""
Creates dataframe of default scenario settings from input_main_scenario_options table
Parameters
----------
**schema** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**df** : 'pd.df'
Dataframe of default scenario settings
"""
sql = "SELECT * FROM {}.input_main_scenario_options".format(schema)
df = pd.read_sql(sql, con)
return df
def get_userdefined_scenario_settings(schema, table_name, con):
"""
Creates dataframe of user created scenario settings
Parameters
----------
**schema** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**df** : 'pd.df'
Dataframe of user created scenario settings
"""
sql = "SELECT * FROM {}.{}".format(schema, table_name)
df = pd.read_sql(sql, con)
return df
#%%
def import_table(scenario_settings, con, engine, role, input_name, csv_import_function=None):
"""
Imports table from csv given the name of the csv
Parameters
----------
**scenario_settings** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**role** : 'string'
Owner of schema
**input_name** : 'string'
Name of the csv file that should be imported
**csv_import_function** : 'function'
Specific function to import and munge csv
Returns
-------
**df** : 'pd.df'
Dataframe of the table that was imported
"""
schema = scenario_settings.schema
shared_schema = 'diffusion_shared'
input_data_dir = scenario_settings.input_data_dir
user_scenario_settings = get_scenario_settings(schema, con)
scenario_name = user_scenario_settings[input_name].values[0]
if scenario_name == 'User Defined':
userdefined_table_name = "input_" + input_name + "_user_defined"
scenario_userdefined_name = get_userdefined_scenario_settings(schema, userdefined_table_name, con)
scenario_userdefined_value = scenario_userdefined_name['val'].values[0]
if check_table_exists(shared_schema, scenario_userdefined_value, con):
sql = 'SELECT * FROM {}."{}"'.format(shared_schema, scenario_userdefined_value)
df = pd.read_sql(sql, con)
else:
df = pd.read_csv(os.path.join(input_data_dir, input_name, scenario_userdefined_value + '.csv'), index_col=False)
if csv_import_function is not None:
df = csv_import_function(df)
df_to_psql(df, engine, shared_schema, role, scenario_userdefined_value)
else:
if input_name == 'elec_prices':
df = datfunc.get_rate_escalations(con, scenario_settings.schema)
elif input_name == 'load_growth':
df = datfunc.get_load_growth(con, scenario_settings.schema)
elif input_name == 'pv_prices':
df = datfunc.get_technology_costs_solar(con, scenario_settings.schema)
return df
#%%
def stacked_sectors(df):
"""
Takes dataframe and sorts table fields by sector
Parameters
----------
**df** : 'pd.df'
Dataframe to be sorted by sector.
Returns
-------
**output** : 'pd.df'
Dataframe of the table that was imported and split by sector
"""
sectors = ['res', 'ind','com','nonres','all']
output = pd.DataFrame()
core_columns = [x for x in df.columns if x.split("_")[-1] not in sectors]
for sector in sectors:
if sector in set([i.split("_")[-1] for i in df.columns]):
sector_columns = [x for x in df.columns if x.split("_")[-1] == sector]
rename_fields = {k:"_".join(k.split("_")[0:-1]) for k in sector_columns}
temp = df.loc[:,core_columns + sector_columns]
temp = temp.rename(columns=rename_fields)
if sector =='nonres':
sector_list = ['com', 'ind']
elif sector=='all':
sector_list = ['com', 'ind','res']
else:
sector_list = [sector]
for s in sector_list:
temp['sector_abbr'] = s
output = pd.concat([output, temp], ignore_index=True, sort=False)
return output
#%%
def deprec_schedule(df):
"""
Takes depreciation schedule and sorts table fields by depreciation year
Parameters
----------
**df** : 'pd.df'
Dataframe to be sorted by sector.
Returns
-------
**output** : 'pd.df'
Dataframe of depreciation schedule sorted by year
"""
columns = ['1', '2', '3', '4', '5', '6']
df['deprec_sch']=df.apply(lambda x: [x.to_dict()[y] for y in columns], axis=1)
max_required_year = 2050
max_input_year = np.max(df['year'])
missing_years = np.arange(max_input_year + 1, max_required_year + 1, 1)
last_entry = df[df['year'] == max_input_year]
for year in missing_years:
last_entry['year'] = year
df = df.append(last_entry)
return df.loc[:,['year','sector_abbr','deprec_sch']]
#%%
def melt_year(parameter_name):
"""
Returns a function to melt dataframe's columns of years and parameter values to the row axis
Parameters
----------
**parameter name** : 'string'
Name of the parameter value in dataframe.
Returns
-------
**function** : 'function'
Function that melts years and parameter value to row axis
"""
def function(df):
"""
Unpivots years and values from columns of dataframe to rows for each state abbreviation
Parameters
----------
**df** : 'pd.df'
Dataframe to be unpivot.
Returns
-------
**df_tidy** : 'pd.df'
Dataframe with every other year and the parameter value for that year as rows for each state
"""
years = np.arange(2014, 2051, 2)
years = [str(year) for year in years]
df_tidy = pd.melt(df, id_vars='state_abbr', value_vars=years, var_name='year', value_name=parameter_name)
df_tidy['year'] = df_tidy['year'].astype(int)
return df_tidy
return function
#%%
def import_agent_file(scenario_settings, con, cur, engine, model_settings, agent_file_status, input_name):
"""
Generates new agents or uses pre-generated agents from provided .pkl file
Parameters
----------
**scenario_settings** : 'SQL schema'
Schema of the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
**cur** : 'SQL cursor'
Cursor
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**model_settings** : 'object'
Model settings that apply to all scenarios
**agent_file_status** : 'attribute'
Attribute that describes whether to use pre-generated agent file or create new
**input_name** : 'string'
.Pkl file name substring of pre-generated agent table
Returns
-------
**solar_agents** : 'Class'
Instance of Agents class with either user pre-generated or new data
"""
schema = scenario_settings.schema
input_agent_dir = model_settings.input_agent_dir
state_to_model = scenario_settings.state_to_model
ISO_List = ['ERCOT', 'NEISO', 'NYISO', 'CAISO', 'PJM', 'MISO', 'SPP']
if agent_file_status == 'Use pre-generated Agents':
userdefined_table_name = "input_" + input_name + "_user_defined"
scenario_userdefined_name = get_userdefined_scenario_settings(schema, userdefined_table_name, con)
scenario_userdefined_value = scenario_userdefined_name['val'].values[0]
solar_agents_df = pd.read_pickle(os.path.join(input_agent_dir, scenario_userdefined_value+".pkl"))
if scenario_settings.region in ISO_List:
solar_agents_df = pd.read_pickle(os.path.join(input_agent_dir, scenario_userdefined_value+".pkl"))
else:
solar_agents_df = solar_agents_df[solar_agents_df['state_abbr'].isin(state_to_model)]
if solar_agents_df.empty:
raise ValueError('Region not present within pre-generated agent file - Edit Inputsheet')
solar_agents = Agents(solar_agents_df)
solar_agents.on_frame(agent_mutation.elec.reassign_agent_tariffs, con)
else:
raise ValueError('Generating agents is not supported at this time. Please select "Use pre-generated Agents" in the input sheet')
return solar_agents
#%%
def process_elec_price_trajectories(elec_price_traj):
"""
Returns the trajectory of the change in electricity prices over time with 2018 as the base year
Parameters
----------
**elec_price_traj** : 'pd.df'
Dataframe of electricity prices by year and ReEDS BA
Returns
-------
**elec_price_change_traj** : 'pd.df'
Dataframe of annual electricity price change factors from base year
"""
county_to_ba_lkup = pd.read_csv('county_to_ba_mapping.csv')
# For SS19, when using Retail Electricity Prices from ReEDS
base_year_prices = elec_price_traj[elec_price_traj['year']==2018]
base_year_prices.rename(columns={'elec_price_res':'res_base',
'elec_price_com':'com_base',
'elec_price_ind':'ind_base'}, inplace=True)
elec_price_change_traj = pd.merge(elec_price_traj, base_year_prices[['res_base', 'com_base', 'ind_base', 'ba']], on='ba')
elec_price_change_traj['elec_price_change_res'] = elec_price_change_traj['elec_price_res'] / elec_price_change_traj['res_base']
elec_price_change_traj['elec_price_change_com'] = elec_price_change_traj['elec_price_com'] / elec_price_change_traj['com_base']
elec_price_change_traj['elec_price_change_ind'] = elec_price_change_traj['elec_price_ind'] / elec_price_change_traj['ind_base']
# Melt by sector
res_df = pd.DataFrame(elec_price_change_traj['year'])
res_df = elec_price_change_traj[['year', 'elec_price_change_res', 'ba']]
res_df.rename(columns={'elec_price_change_res':'elec_price_multiplier'}, inplace=True)
res_df['sector_abbr'] = 'res'
com_df = pd.DataFrame(elec_price_change_traj['year'])
com_df = elec_price_change_traj[['year', 'elec_price_change_com', 'ba']]
com_df.rename(columns={'elec_price_change_com':'elec_price_multiplier'}, inplace=True)
com_df['sector_abbr'] = 'com'
ind_df = pd.DataFrame(elec_price_change_traj['year'])
ind_df = elec_price_change_traj[['year', 'elec_price_change_ind', 'ba']]
ind_df.rename(columns={'elec_price_change_ind':'elec_price_multiplier'}, inplace=True)
ind_df['sector_abbr'] = 'ind'
elec_price_change_traj = pd.concat([res_df, com_df, ind_df], ignore_index=True, sort=False)
elec_price_change_traj = pd.merge(county_to_ba_lkup, elec_price_change_traj, how='left', on=['ba'])
elec_price_change_traj.drop(['ba'], axis=1, inplace=True)
return elec_price_change_traj
#%%
def process_wholesale_elec_prices(wholesale_elec_price_traj):
"""
Returns the trajectory of the change in wholesale electricity prices over time
Parameters
----------
**wholesale_elec_price_traj** : 'pd.df'
Dataframe of wholesale electricity prices by year and ReEDS BA
Returns
-------
**wholesale_elec_price_change_traj** : 'pd.df'
Dataframe of annual electricity price change factors from base year
"""
county_to_ba_lkup = | pd.read_csv('county_to_ba_mapping.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal( | algos.mode(c) | pandas.core.algorithms.mode |
import pickle
import pandas as pd
import numpy as np
from collections import deque
import copy
class Signal_Validation:
def __init__(self, net, fault_type=3, select_signal=0):
# net 받기
self.net = net
#
self.select_signal = select_signal
self.threshold = [0.011581499203325669, 0.009462367390260244, 0.008903015480589159, 0.009594334339569006,
0.031215020667924767, 0.010818916559719643, 0.01049919201077266, 0.01062811011351488,
0.010651478620771508, 0.011562519033165936, 0.035823854381993835, 0.039710045714257534,
0.033809111781334084, 0.04924519916104178, 0.04715594067619352, 0.042831757003614385,
0.008778805078996987, 0.014718878351330346, 0.02059897081470507, 0.027989265704257082,
0.0274660154968856, 0.025115614397052698, 0.03167101131485395, 0.02955934155605648,
0.06220589578881775, 0.05572199208638379]
self.in_col = ['BHSV', 'BHTV', 'BLSV', 'BLTV', 'BRHCV', 'BRHSV', 'KLAMPO241', 'KLAMPO242', 'KLAMPO243', 'UCOND',
'ULPHOUT', 'ZAFWTK', 'ZINST103', 'BFV122', 'BHV1', 'BHV2', 'BLV459', 'BLV616', 'BPV145',
'URHXUT', 'WBOAC', 'WDEWT', 'KLAMPO234', 'ZINST119', 'ZINST121', 'BHV108', 'BHV208', 'BHV308',
'BHV501', 'WFWLN1', 'WFWLN2', 'WFWLN3', 'ZINST100', 'ZINST101', 'ZINST85', 'ZINST86', 'ZINST87',
'ZINST99', 'BFV478', 'BFV479', 'BFV488', 'BFV498', 'BFV499', 'BHV502', 'KBCDO20', 'KBCDO21',
'KBCDO22', 'KLAMPO124', 'KLAMPO125', 'KLAMPO126', 'KSWO132', 'KSWO133', 'KSWO134', 'UAVLEG1',
'UAVLEG2', 'UAVLEG3', 'UCHGUT', 'UCOLEG1', 'UCOLEG2', 'UCOLEG3', 'UHOLEG1', 'UHOLEG2', 'UHOLEG3',
'UNRHXUT', 'UPRZ', 'UUPPPL', 'WCHGNO', 'WNETLD', 'ZINST36', 'ZINST63', 'ZINST65', 'ZINST67',
'ZINST68', 'ZINST69', 'ZINST79', 'ZINST80', 'ZINST81', 'ZPRZSP', 'ZPRZUN', 'ZINST70', 'ZINST72',
'ZINST73', 'ZINST75', 'ZINST76', 'ZINST77','ZINST78', 'KFAST', 'KBCDO10', 'KBCDO11', 'KBCDO3',
'KBCDO4', 'KBCDO5', 'KBCDO6', 'KBCDO7', 'KBCDO8', 'KBCDO9', 'KLAMPO21', 'KLAMPO22', 'ZINST1',
'CIODMPC', 'QPROLD', 'UAVLEGS', 'ZINST124', 'BFWMAI', 'BPV145I', 'CBRWIN', 'CIOD', 'DECH',
'DECH1', 'DENEUO', 'DNEUTR', 'EAFWTK', 'EHPHA', 'EHPHB', 'ELPHA', 'ELPHB', 'FCDP1', 'FCDP2',
'FCDP3', 'FCWP', 'FEICDP', 'FEIFWP', 'FFWP1', 'FFWP2', 'FFWP3', 'FNET', 'HHPTEX', 'HRHDT',
'HSDMP', 'HTIN', 'KBCDO23', 'KCONTS9', 'KEXCTB', 'KFWP1', 'KFWP2', 'KFWP3', 'KIRTBS',
'KLAMPO103', 'KLAMPO105', 'KLAMPO138', 'KLAMPO140', 'KLAMPO142', 'KLAMPO152', 'KLAMPO154',
'KLAMPO171', 'KLAMPO173', 'KLAMPO180', 'KLAMPO194', 'KLAMPO196', 'KLAMPO49', 'KLAMPO60',
'KLAMPO66', 'KLAMPO79', 'KLAMPO97', 'KMOSTP', 'KOILTM', 'KPERMS10', 'KPERMS13', 'KPERMS7',
'KPRTBS', 'KRCP1', 'KRCP2', 'KRCP3', 'KRILM', 'KTBREST', 'KTLC', 'KTSIS', 'KTSISC', 'KZBANK1',
'KZBANK2', 'KZBANK3', 'KZBANK4', 'KZBANK5', 'KZBANK6', 'KZBANK7', 'KZBANK8', 'KZROD1', 'KZROD10',
'KZROD11', 'KZROD12', 'KZROD13', 'KZROD14', 'KZROD15', 'KZROD16', 'KZROD17', 'KZROD18',
'KZROD19', 'KZROD2', 'KZROD20', 'KZROD21', 'KZROD22', 'KZROD23', 'KZROD24', 'KZROD25', 'KZROD26',
'KZROD27', 'KZROD28', 'KZROD29', 'KZROD3', 'KZROD30', 'KZROD31', 'KZROD32', 'KZROD33', 'KZROD34',
'KZROD35', 'KZROD36', 'KZROD37', 'KZROD38', 'KZROD39', 'KZROD4', 'KZROD40', 'KZROD41', 'KZROD42',
'KZROD43', 'KZROD44', 'KZROD45', 'KZROD46', 'KZROD47', 'KZROD48', 'KZROD49', 'KZROD5', 'KZROD50',
'KZROD51', 'KZROD52', 'KZROD6', 'KZROD7', 'KZROD8', 'KZROD9', 'PCDTB', 'PCNDS', 'PFWP',
'PFWPOUT', 'PHDTK', 'PHPTIN', 'PLETDB', 'PLETIN', 'PPRZ', 'PPRZCO', 'PPRZLP', 'PPRZN', 'PPRZW',
'PRHTR', 'PSG1', 'PSG2', 'PSG3', 'PSGLP', 'PSGWM', 'PTIN', 'PTINWM', 'PWRHFX', 'QLDSET', 'QLOAD',
'QLRSET', 'QNET', 'QPRONOR', 'QPROREL', 'QTHNOR', 'UCCWNR', 'UCCWSW', 'UCHGIN', 'UCNDS', 'UCOOL',
'UFDW', 'UHPHOA', 'UHPHOB', 'ULPHOA', 'ULPHOB', 'UMAXDT', 'UNORDT', 'URHDT', 'URHTR', 'UTIN',
'VNET', 'VRWST', 'WAUXSP', 'WBHFWP', 'WCDHTR', 'WCDPO', 'WCWAT', 'WDEMI', 'WFDW', 'WFWBYP1',
'WFWBYP2', 'WFWBYP3', 'WFWCNT1', 'WFWCNT2', 'WFWCNT3', 'WFWP', 'WFWPBY', 'WFWPOUT', 'WGSL',
'WHDTP', 'WHPDRNA', 'WHPDRNB', 'WHPDTA', 'WHPDTB', 'WHPHBYA', 'WHPHBYB', 'WHPHDT', 'WHPHINA',
'WHPHINB', 'WHPRH', 'WHPSRQA', 'WHPSRQB', 'WHPT', 'WHPTEX', 'WHPTEXA', 'WHPTEXB', 'WLPDRNA',
'WLPDRNB', 'WLPHBYA', 'WLPHBYB', 'WLPHCD', 'WLPHINA', 'WLPHINB', 'WLPHOUT', 'WLPT', 'WLPTC',
'WLPTEX', 'WLPTEXA', 'WLPTEXB', 'WLV616', 'WNETCH', 'WRHDRN', 'WRHDRNA', 'WRHDRNB', 'WRHDT',
'WRHFWP', 'WRHSTM', 'WSGRCP1', 'WSGRCP2', 'WSGRCP3', 'WSTM1', 'WSTM2', 'WSTM3', 'WTIN', 'XPEINP',
'XPEOUT', 'XSEINP', 'XSEOUT', 'XSMINJ', 'XSMOUT', 'YNET', 'ZBANK', 'ZHPHA', 'ZHPHB', 'ZINST10',
'ZINST104', 'ZINST106', 'ZINST109', 'ZINST110', 'ZINST114', 'ZINST124', 'ZINST126', 'ZINST37',
'ZINST38', 'ZINST39', 'ZINST40', 'ZINST41', 'ZINST43', 'ZINST44', 'ZINST46', 'ZINST49',
'ZINST50', 'ZINST51', 'ZINST52', 'ZINST53', 'ZINST54', 'ZINST57', 'ZINST58', 'ZINST59', 'ZINST6',
'ZINST60', 'ZINST61', 'ZINST62', 'ZINST64', 'ZINST82', 'ZINST83', 'ZINST84', 'ZINST91',
'ZINST92', 'ZINST94', 'ZINST95', 'ZINST96', 'ZLPHA', 'ZLPHB', 'ZPRZ', 'ZPRZNO', 'ZSGN1', 'ZSGN2',
'ZSGN3', 'ZSGNOR1', 'ZSGNOR2', 'ZSGNOR3', 'ZSGW1', 'ZSGW2', 'ZSGW3']
self.out_col = ['ZINST103', 'WFWLN1', 'WFWLN2', 'WFWLN3', 'ZINST100', 'ZINST101', 'ZINST85', 'ZINST86',
'ZINST87', 'ZINST99', 'UCHGUT', 'UCOLEG1', 'UCOLEG2', 'UCOLEG3', 'UPRZ', 'UUPPPL', 'WNETLD',
'ZINST63', 'ZINST65', 'ZINST79', 'ZINST80', 'ZINST81', 'ZINST70', 'ZINST72', 'ZINST73',
'ZINST75']
self.ylabel = ['FEEDWATER PUMP OUTLET PRESS', 'FEEDWATER LINE #1 FLOW (KG-SEC).',
'FEEDWATER LINE #2 FLOW (KG-SEC).', 'FEEDWATER LINE #3 FLOW (KG-SEC).',
'FEEDWATER TEMP', 'MAIN STEAM FLOW', 'STEAM LINE 3 FLOW', 'STEAM LINE 2 FLOW',
'STEAM LINE 1 FLOW', 'MAIN STEAM HEADER PRESSURE', 'CHARGING LINE OUTLET TEMPERATURE',
'LOOP #1 COLDLEG TEMPERATURE.', 'LOOP #2 COLDLEG TEMPERATURE.', 'LOOP #3 COLDLEG TEMPERATURE.',
'PRZ TEMPERATURE.', 'CORE OUTLET TEMPERATURE.', 'NET LETDOWN FLOW.', 'PRZ LEVEL',
'PRZ PRESSURE(WIDE RANGE)', 'LOOP 3 FLOW', 'LOOP 2 FLOW', 'LOOP 1 FLOW', 'SG 3 LEVEL(WIDE)',
'SG 1 LEVEL(WIDE)', 'SG 3 PRESSURE', 'SG 1 PRESSURE']
self.input_num = ['12', '29', '30', '31', '32', '33', '34', '35', '36', '37', '56', '57', '58', '59', '64',
'65', '67', '69', '70', '74', '75', '76', '79', '80', '81', '82']
self.output_num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16',
'17', '18', '19', '20', '21', '22', '23', '24', '25']
self.High = ['100', '500', '500', '500', '500', '150', '500', '500', '500', '80', '300', '600', '600', '600',
'600', '600', '10', '80', '180', '150', '150', '150', '100', '100', '100', '100']
self.Low = ['', '', '', '', '0', '', '', '', '', '', '0', '0', '0', '0', '0', '0', '', '', '0', '', '', '', '0',
'0', '0', '0']
self.Current = ['', '', '', '', '0', '', '', '', '', '', '0', '0', '0', '0', '0', '0', '', '', '0', '', '', '',
'0', '0', '0', '0']
self.select_fault_type = fault_type # {0:high, 1:low, 2:current, 3:normal}
self.select_input_signal = int(self.input_num[self.select_signal]) # you can choice one signal
self.select_output_signal = int(self.output_num[self.select_signal]) # you can choice one signal
self.select_high = self.High[self.select_signal]
self.select_low = self.Low[self.select_signal]
self.select_current = self.Current[self.select_signal]
self.stack_data = deque(maxlen=2)
self.tick = 0
# minmax Load
with open('./AI/AI_SV_scaler_in.pkl', 'rb') as f:
self.scaler_in = pickle.load(f)
with open('./AI/AI_SV_scaler_out.pkl', 'rb') as f:
self.scaler_out = pickle.load(f)
def step(self, mem):
"""
CNS mem 값을 받고 network predict 한 값을 반환함.
:param mem: CNS mem 받음
:return: {'Para': val, ... }
"""
get_db = self.scaler_in.transform([[mem[para]['Val'] for para in self.in_col]])
fin_out = self.scaler_out.inverse_transform(self.net.predict(get_db))[0]
fin_out_add_label = {key: fin_out[i] for key, i in zip(self.out_col, range(len(self.out_col)))}
return fin_out_add_label
def SV_result(self, db):
while True:
real_data = db[self.in_col]
self.stack_data.append(real_data)
if len(self.stack_data) < 2:
pass
else:
pd_real_data_1 = self.stack_data[0]
pd_real_data_2 = self.stack_data[1]
test_in_1 = pd_real_data_1
test_out_1 = test_in_1[self.out_col]
test_in = pd_real_data_2
# test_in = test_in.apply(creat_noise)
test_out = test_in[self.out_col]
# test_out = test_out.apply(creat_noise)
test_A_in = copy.deepcopy(test_in)
test_A_out = copy.deepcopy(test_out)
# print(test_in)
if self.tick >= 300:
if self.select_fault_type == 0:
test_in.iloc[:, self.select_input_signal] = int(self.select_high)
test_out.iloc[:, self.select_output_signal] = int(self.select_high)
elif self.select_fault_type == 1:
if self.Low == '':
print('Not define low fault')
else:
test_in.iloc[:, self.select_input_signal] = int(self.select_low)
test_out.iloc[:, self.select_output_signal] = int(self.select_low)
elif self.select_fault_type == 2:
if self.Current == '':
print('Not define current fault')
else:
test_in.iloc[:, self.select_input_signal] = test_in_1.iloc[:, self.select_input_signal]
test_out.iloc[:, self.select_output_signal] = test_out_1.iloc[:, self.select_output_signal]
elif self.select_fault_type == 3:
print('No fault signal')
test_in = pd.DataFrame(self.scaler_in.transform(test_in), columns=self.in_col, index=[self.tick])
test_out = pd.DataFrame(self.scaler_out.transform(test_out), columns=self.out_col, index=[self.tick])
predictions_test = self.network.SV_model.predict(test_in)
p_test = | pd.DataFrame(predictions_test, columns=self.out_col, index=[self.tick]) | pandas.DataFrame |
from Voicelab.VoicelabWizard.VoicelabDataModel import VoicelabDataModel
from Voicelab.pipeline.Pipeline import Pipeline
import Voicelab.toolkits.Voicelab as Voicelab
import copy
from Voicelab.VoicelabWizard.InputTab import InputTab
import parselmouth
from parselmouth.praat import call
from Voicelab.default_settings import visualize_list, function_requirements, display_whitelist
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from datetime import datetime
from PyQt5.QtWidgets import QMessageBox
"""
# Voicelab Controller: coordinates the interaction between the presentation of the data and its storage
# The data controller does not need to know how the data gets in from the user, nor how the data
# is stored. This could in the future let us change the front end and backend more flexibly.
"""
class VoicelabController:
"""
# init: setup the base state of a controller, including a data model
"""
def __init__(self):
self.data_model = VoicelabDataModel()
self.active_settings_cache = {}
self.active_functions_cache = {}
self.last_used_settings = {}
self.progress = 0
self.progress_callback = lambda node, start, current, end: print(
node.node_id, start, current, end
)
self.figures = []
self.spectra = []
self.displayable_results = {}
"""
# load_figure: load a single figure into the list of figures. This lets us keep track of and
# close the figures when they are not needed. This is important especially given matplotlib's statefulness
"""
def load_figure(self, figure):
self.figures.append(figure)
"""
# load_spectrum: load a single spectrum into the list of spectrums. This lets us keep track of and
# close the spectrums when they are not needed. This is important especially given matplotlib's statefulness
"""
def load_spectrum(self, spectrum):
self.spectra.append(spectrum)
"""
# reset_figures: clear all of the figures that we have saved and empty the figures variable
"""
def reset_figures(self):
for figure in self.figures:
figure.clear()
plt.close(figure)
self.figures = []
"""
# load voices: from a list of file paths, create voice objects and save them in the model
"""
def load_voices(self, file_paths):
for file_path in file_paths:
self.data_model.load_voice(parselmouth.Sound(file_path), file_path)
return self.data_model.loaded_voices
"""
# unload voices: from a list of file paths, remove the associated voice file from the model
"""
def unload_voices(self, file_paths):
for file_path in file_paths:
self.data_model.unload_voice(file_path)
return self.data_model.loaded_voices
"""
# activate_voices: from a list of file paths, set the associated voice files for processing
"""
def activate_voices(self, file_paths):
self.data_model.activate_voices(file_paths)
print(self.data_model.active_voices)
return self.data_model.active_voices
"""
# deactivate voices: from a list of file paths, remove the associated files from processing
"""
def deactivate_voices(self, file_paths):
for file_path in file_paths:
self.data_model.deactivate_voice(file_path)
return self.data_model.active_voices
"""
# load function: load a single function into the data model
"""
def load_function(self, fn_name, fn_node, default=False) -> object:
self.data_model.load_function(fn_name, fn_node, default)
return self.data_model.loaded_functions
"""
# activate function: set a single function to run during processing
# TODO: this behaviour could probably be handled only by the controller rather than by the model
"""
def activate_function(self, fn_name):
self.data_model.activate_function(fn_name)
return self.data_model.active_functions
"""
# deactivate function: set a single function to not run during processing
# todo: this behaviour could probably be handled only by the controller rather than by the model
"""
def deactivate_function(self, fn_name):
self.data_model.deactivate_function(fn_name)
return self.data_model.active_functions
"""
# set setting: set a value for a given setting
"""
def set_settings(self, fn, settings, values):
for i, setting in enumerate(settings):
self.data_model.set_setting(fn, settings[i], values[i])
return self.data_model.active_settings
"""
# activate setting: indicate that a setting is non-default
# TODO: this behaviour could probably be handled only by the controller rather than by the model
"""
def activate_settings(self, settings):
for i, setting in enumerate(settings):
self.data_model.activate_setting(setting)
return self.data_model.active_settings
"""
# reset setting: take a setting id and reset it to it's default value
"""
def reset_setting(self, fn_name, setting_name):
self.data_model.reset_setting(fn_name, setting_name)
"""
# save_state: caches the setting values so that they can be retrieved later. Used currently for
# toggling between default and non-default settings
"""
def save_state(self):
self.active_settings_cache = copy.copy(self.active_settings)
self.active_functions_cache = copy.copy(self.active_functions)
"""
# load state : swap whatever is currently loaded with what is cached.
"""
def load_state(self):
self.data_model.swap_active_settings(self.active_settings_cache)
self.data_model.swap_active_functions(self.active_functions_cache)
"""
# reset active settings : swap all active settings with the default values. convenience function
# so that we don't have to loop through all of the settings each time we want to do this action
"""
def reset_active_settings(self):
self.data_model.swap_active_settings(self.default_settings)
"""
# reset active functions : swap all active functions with the default functions. convenience
# so that we don't have to loop through all of the settings each time we want to do this action
"""
def reset_active_functions(self):
self.data_model.swap_active_functions(self.default_functions)
"""
# reset results : Empty the results in preperation for another run
"""
def reset_results(self):
self.data_model.reset_results()
"""
# start processing: Start processing the loaded voice files using the set of active functions
# and active settings.
"""
def start_processing(self, active_voices, active_functions, active_settings):
# we want to deep copy the active settings otherwise they may be unintentionally
# modifed with values during processing
# self.last_used_settings = copy.deepcopy(active_settings)
# DRF - I guess we don't really need to do that after all since it's commented out
# save the settings so we can put them in the excel file later
self.last_used_settings = active_settings
# reset the results in case this isn't our first run since the program opened
self.reset_results()
# Create an empty WARIO pipeline
pipeline = Pipeline()
# Create a node that will load all of the voices
# todo figure out why we need to do this, since we already loaded the voices
load_voices = Voicelab.LoadVoicesNode("Load Voice")
# Set up the load node with the appropriate file locations
load_voices.args["file_locations"] = active_voices
# Add the node to the pipeline
pipeline.add(load_voices)
# We want to specially configure the visualize voice node because later on we will be attaching things to it later
if "Create Spectrograms" in active_functions:
# Create a node that will draw the default spectrogram for the loaded voices, we always want to plot the spectrogram
visualize_voices = Voicelab.VisualizeVoiceNode("Create Spectrograms")
# if there are settings the user has configured, we want to attach them to the node
visualize_voices.args = active_settings["Create Spectrograms"]
# visualize_voices.args[value] = self.model['settings']['Visualize Voice']['value'][value]
# Connect the loaded voice to the visualize node so it has access to it
pipeline.connect((load_voices, "voice"), (visualize_voices, "voice"))
# Add the node to the pipeline
pipeline.add(visualize_voices)
# todo Fix this
if "Visualize Spectrum" in active_functions:
# Create a node that will draw the default spectrogram for the loaded voices, we always want to plot the spectrogram
visualize_spectrum = Voicelab.VisualizeSpectrumNode("Visualize Spectrum")
# if there are settings the user has configured, we want to attach them to the node
visualize_spectrum.args = active_settings["Visualize Spectrum"]
# Connect the loaded voice to the visualize node so it has access to it
pipeline.connect((load_voices, "voice"), (visualize_spectrum, "voice"))
# Add the node to the pipeline
pipeline.add(visualize_spectrum)
# For each checked operation we create the appropriate node, assign its associated
# parameters, and add it to the pipeline connecting it to the load voice node and
# visualize node. those two functions are always performed
for fn in active_functions:
# Visualize is handled outside of this
# if fn != "Create Spectrograms":
active_functions[fn].args = active_settings[fn]
pipeline.add(active_functions[fn])
pipeline.connect(
(load_voices, "voice"), (active_functions[fn], "voice")
)
pipeline.connect(
(load_voices, "file_path"), (active_functions[fn], "file_path")
)
# if "Create Spectrograms" in active_functions and fn in visualize_list:
# pipeline.connect(
# (active_functions[fn], visualize_list[fn]),
# (visualize_voices, visualize_list[fn]),
# )
# Some nodes may require specific values from upstream nodes (as specified in the default settings file)
# Resolve these dependancies and create the relevant connections
for fn_name in function_requirements:
if fn_name in active_functions:
child_node = active_functions[fn_name]
# function requirements are a defined as a tuple of parent_name followed by the name of the shared argument
for parent_name, argument in function_requirements[fn_name]:
parent_node = active_functions[parent_name]
pipeline.connect((parent_node, argument), (child_node, argument))
pipeline.listen(self.progress_callback)
pipeline_results = pipeline.start()
finished_window = QMessageBox()
finished_window.setWindowTitle("Finished")
finished_window.setText("Finished processing.\nCheck your data, then save.")
finished_window.setIcon(QMessageBox.Information)
finished_window.exec_()
# Collect the results of the pipeline running
for i, result_file in enumerate(pipeline_results):
for result_fn in pipeline_results[i]:
if result_fn.node_id == "Create Spectrograms":
# "figure" is the maptlotlib figure returned from VisualizeVoiceNode.py
# it is a dictionary key, the dictionary value is the actual figure `fig` from fig = plt.figure()
figure: object = pipeline_results[i][result_fn]["figure"]
self.load_figure(figure)
elif result_fn.node_id == "Visualize Spectrum":
# "spectrum" is the matplotlib figure from VisualizeSpectrum.py
# it is a dictionary key, the dictionary value is the actual figure `fig` from fig = plt.figure()
spectrum: object = pipeline_results[i][result_fn]["spectrum"]
self.load_spectrum(spectrum)
self.data_model.load_result(
active_voices[i], result_fn.node_id, pipeline_results[i][result_fn]
)
for arg_setting in result_fn.args:
self.data_model.set_computed_setting(result_fn.node_id, arg_setting, result_fn.args[arg_setting])
return self.data_model.active_results
"""
# save_results: save the results of processing to the files system
"""
def save_results(
self, active_results, active_functions, last_used_settings, save_location
):
append_file_to_results = False
append_file_to_settings = False
if save_location != "":
# Prepare the data for saving as an excel workbook
results_sheets = {
fn_name: {"Input File": []} for fn_name in active_functions
}
settings_sheets = {
fn_name: {"Input File": []} for fn_name in active_functions
}
# Create a new sheet for each function, and fill with the results for each file
for i, file_path in enumerate(active_results):
file_name = file_path.split("/")[-1].split(".wav")[0]
for fn_name in active_results[file_path]:
if fn_name != "Load Voice":
# We want to exclude saving the unmodified voice
for result_name in active_results[file_path][fn_name]:
result_value = active_results[file_path][fn_name][
result_name
]
header_value = ""
output_value = ""
if isinstance(result_value, np.generic):
result_value = result_value.item()
# if the result is a modified sound file, we want to save that as a wav file
if isinstance(result_value, parselmouth.Sound):
voice_name = result_value.name
modified_path = (
save_location + "/" + voice_name + ".wav"
)
self.save_voice(result_value, modified_path)
header_value = result_name + " Output File"
output_value = voice_name
# if the result is some sort of matlab figure, we want to save it as a png
# assign filenames based on results dictionary key
elif isinstance(result_value, Figure):
spectrogram_path = ''.join([save_location, "/", file_name, "_spectrogram.png"])
spectrum_path = ''.join([save_location, "/", file_name, "_spectrum.png"])
if result_name == "figure":
self.save_spectrogram(result_value, spectrogram_path)
output_value = spectrogram_path
elif result_name == "spectrum":
self.save_spectrum(result_value, spectrum_path)
output_value = spectrogram_path
header_value = "Output File"
# if the result is any other type that we know how to save, save it as part of the work book
elif type(result_value) in display_whitelist:
header_value = result_name
output_value = result_value
# create a column in the sheet for this type of result if it hasnt already been created
if header_value not in results_sheets[fn_name]:
results_sheets[fn_name][header_value] = []
# append the result to this column
results_sheets[fn_name][header_value].append(
str(output_value)
)
append_file_to_results = True
# fill the settings sheets with values
for j, param_name in enumerate(last_used_settings[fn_name]):
if param_name not in settings_sheets[fn_name]:
settings_sheets[fn_name][param_name] = []
# if there are options, save the first one
if isinstance(
last_used_settings[fn_name][param_name], tuple
):
param_value = last_used_settings[fn_name][param_name][0]
else:
param_value = last_used_settings[fn_name][param_name]
settings_sheets[fn_name][param_name].append(
str(param_value)
)
# Check to see if we have already written the file name to the sheet
results_sheets[fn_name]["Input File"].insert(0, file_name)
settings_sheets[fn_name]["Input File"].insert(0, file_name)
results_writer = | ExcelWriter(save_location + "/voicelab_results.xlsx") | pandas.ExcelWriter |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.errors import ParserWarning
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_list_like
from collections import OrderedDict
from modin.db_conn import ModinDatabaseConnection, UnsupportedDatabaseException
from modin.config import TestDatasetSize, Engine, StorageFormat, IsExperimental
from modin.utils import to_pandas
from modin.pandas.utils import from_arrow
import pyarrow as pa
import os
import sys
import shutil
import sqlalchemy as sa
import csv
import tempfile
from .utils import (
check_file_leaks,
df_equals,
json_short_string,
json_short_bytes,
json_long_string,
json_long_bytes,
get_unique_filename,
io_ops_bad_exc,
eval_io_from_str,
dummy_decorator,
create_test_dfs,
COMP_TO_EXT,
teardown_test_file,
teardown_test_files,
generate_dataframe,
)
if StorageFormat.get() == "Omnisci":
from modin.experimental.core.execution.native.implementations.omnisci_on_native.test.utils import (
eval_io,
align_datetime_dtypes,
)
else:
from .utils import eval_io
if StorageFormat.get() == "Pandas":
import modin.pandas as pd
else:
import modin.experimental.pandas as pd
try:
import ray
EXCEPTIONS = (ray.exceptions.WorkerCrashedError,)
except ImportError:
EXCEPTIONS = ()
from modin.config import NPartitions
NPartitions.put(4)
DATASET_SIZE_DICT = {
"Small": 64,
"Normal": 2000,
"Big": 20000,
}
# Number of rows in the test file
NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT["Small"])
TEST_DATA = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
def assert_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def setup_clipboard(row_size=NROWS):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def parquet_eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""
Helper function to test `to_parquet` method.
Parameters
----------
modin_obj : pd.DataFrame
A Modin DataFrame or a Series to test `to_parquet` method.
pandas_obj: pandas.DataFrame
A pandas DataFrame or a Series to test `to_parquet` method.
fn : str
Name of the method, that should be tested.
extension : str
Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
pandas_df = pandas.read_parquet(unique_filename_pandas)
modin_df = pd.read_parquet(unique_filename_modin)
df_equals(pandas_df, modin_df)
finally:
teardown_test_file(unique_filename_pandas)
try:
teardown_test_file(unique_filename_modin)
except IsADirectoryError:
shutil.rmtree(unique_filename_modin)
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""Helper function to test `to_<extension>` methods.
Args:
modin_obj: Modin DataFrame or Series to test `to_<extension>` method.
pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method.
fn: name of the method, that should be tested.
extension: Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
# parameter `max_retries=0` is set for `to_csv` function on Ray engine,
# in order to increase the stability of tests, we repeat the call of
# the entire function manually
last_exception = None
for _ in range(3):
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
except EXCEPTIONS as exc:
last_exception = exc
continue
break
else:
raise last_exception
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
finally:
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.usefixtures("TestReadCSVFixture")
@pytest.mark.skipif(
IsExperimental.get() and StorageFormat.get() == "Pyarrow",
reason="Segmentation fault; see PR #2347 ffor details",
)
class TestCsv:
# delimiter tests
@pytest.mark.parametrize("sep", [None, "_", ",", ".", "\n"])
@pytest.mark.parametrize("delimiter", ["_", ",", ".", "\n"])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
def test_read_csv_delimiters(
self, make_csv_file, sep, delimiter, decimal, thousands
):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
thousands_separator=thousands,
decimal_separator=decimal,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
delimiter=delimiter,
sep=sep,
decimal=decimal,
thousands=thousands,
)
# Column and Index Locations and Names tests
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize("index_col", [None, "col1"])
@pytest.mark.parametrize("prefix", [None, "_", "col"])
@pytest.mark.parametrize(
"names", [lib.no_default, ["col1"], ["c1", "c2", "c3", "c4", "c5", "c6", "c7"]]
)
@pytest.mark.parametrize(
"usecols", [None, ["col1"], ["col1", "col2", "col6"], [0, 1, 5]]
)
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_col_handling(
self,
header,
index_col,
prefix,
names,
usecols,
skip_blank_lines,
):
if names is lib.no_default:
pytest.skip("some parameters combiantions fails: issue #2312")
if header in ["infer", None] and names is not lib.no_default:
pytest.skip(
"Heterogeneous data in a column is not cast to a common type: issue #3346"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_blank_lines"],
header=header,
index_col=index_col,
prefix=prefix,
names=names,
usecols=usecols,
skip_blank_lines=skip_blank_lines,
)
@pytest.mark.parametrize("usecols", [lambda col_name: col_name in ["a", "b", "e"]])
def test_from_csv_with_callable_usecols(self, usecols):
fname = "modin/pandas/test/data/test_usecols.csv"
pandas_df = pandas.read_csv(fname, usecols=usecols)
modin_df = pd.read_csv(fname, usecols=usecols)
df_equals(modin_df, pandas_df)
# General Parsing Configuration
@pytest.mark.parametrize("dtype", [None, True])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize(
"converters",
[
None,
{
"col1": lambda x: np.int64(x) * 10,
"col2": pandas.to_datetime,
"col4": lambda x: x.replace(":", ";"),
},
],
)
@pytest.mark.parametrize("skipfooter", [0, 10])
def test_read_csv_parsing_1(
self,
dtype,
engine,
converters,
skipfooter,
):
if dtype:
dtype = {
col: "object"
for col in pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], nrows=1
).columns
}
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(converters),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
dtype=dtype,
engine=engine,
converters=converters,
skipfooter=skipfooter,
)
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize(
"skiprows",
[
2,
lambda x: x % 2,
lambda x: x > 25,
lambda x: x > 128,
np.arange(10, 50),
np.arange(10, 50, 2),
],
)
@pytest.mark.parametrize("nrows", [35, None])
@pytest.mark.parametrize(
"names",
[
[f"c{col_number}" for col_number in range(4)],
[f"c{col_number}" for col_number in range(6)],
None,
],
)
@pytest.mark.parametrize("encoding", ["latin1", "windows-1251", None])
def test_read_csv_parsing_2(
self,
make_csv_file,
request,
header,
skiprows,
nrows,
names,
encoding,
):
xfail_case = (
StorageFormat.get() == "Omnisci"
and header is not None
and isinstance(skiprows, int)
and names is None
and nrows is None
)
if xfail_case:
pytest.xfail(
"read_csv fails because of duplicated columns names - issue #3080"
)
if request.config.getoption(
"--simulate-cloud"
).lower() != "off" and is_list_like(skiprows):
pytest.xfail(
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340"
)
if encoding:
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
encoding=encoding,
)
kwargs = {
"filepath_or_buffer": unique_filename
if encoding
else pytest.csvs_names["test_read_csv_regular"],
"header": header,
"skiprows": skiprows,
"nrows": nrows,
"names": names,
"encoding": encoding,
}
if Engine.get() != "Python":
df = pandas.read_csv(**dict(kwargs, nrows=1))
# in that case first partition will contain str
if df[df.columns[0]][df.index[0]] in ["c1", "col1", "c3", "col3"]:
pytest.xfail("read_csv incorrect output with float data - issue #2634")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
**kwargs,
)
@pytest.mark.parametrize("true_values", [["Yes"], ["Yes", "true"], None])
@pytest.mark.parametrize("false_values", [["No"], ["No", "false"], None])
@pytest.mark.parametrize("skipfooter", [0, 10])
@pytest.mark.parametrize("nrows", [35, None])
def test_read_csv_parsing_3(
self,
true_values,
false_values,
skipfooter,
nrows,
):
xfail_case = (
(false_values or true_values)
and Engine.get() != "Python"
and StorageFormat.get() != "Omnisci"
)
if xfail_case:
pytest.xfail("modin and pandas dataframes differs - issue #2446")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_yes_no"],
true_values=true_values,
false_values=false_values,
skipfooter=skipfooter,
nrows=nrows,
)
def test_read_csv_skipinitialspace(self):
unique_filename = get_unique_filename()
str_initial_spaces = (
"col1,col2,col3,col4\n"
"five, six, seven, eight\n"
" five, six, seven, eight\n"
"five, six, seven, eight\n"
)
eval_io_from_str(str_initial_spaces, unique_filename, skipinitialspace=True)
@pytest.mark.parametrize(
"test_case",
["single_element", "single_column", "multiple_columns"],
)
def test_read_csv_squeeze(self, request, test_case):
if request.config.getoption("--simulate-cloud").lower() != "off":
pytest.xfail(
reason="Error EOFError: stream has been closed in `modin in the cloud` mode - issue #3329"
)
unique_filename = get_unique_filename()
str_single_element = "1"
str_single_col = "1\n2\n3\n"
str_four_cols = "1, 2, 3, 4\n5, 6, 7, 8\n9, 10, 11, 12\n"
case_to_data = {
"single_element": str_single_element,
"single_column": str_single_col,
"multiple_columns": str_four_cols,
}
eval_io_from_str(case_to_data[test_case], unique_filename, squeeze=True)
eval_io_from_str(
case_to_data[test_case], unique_filename, header=None, squeeze=True
)
def test_read_csv_mangle_dupe_cols(self):
if StorageFormat.get() == "Omnisci":
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
unique_filename = get_unique_filename()
str_non_unique_cols = "col,col,col,col\n5, 6, 7, 8\n9, 10, 11, 12\n"
eval_io_from_str(str_non_unique_cols, unique_filename, mangle_dupe_cols=True)
# NA and Missing Data Handling tests
@pytest.mark.parametrize("na_values", ["custom_nan", "73"])
@pytest.mark.parametrize("keep_default_na", [True, False])
@pytest.mark.parametrize("na_filter", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_nans_handling(
self,
na_values,
keep_default_na,
na_filter,
verbose,
skip_blank_lines,
):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_nans"],
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
skip_blank_lines=skip_blank_lines,
)
# Datetime Handling tests
@pytest.mark.parametrize(
"parse_dates", [True, False, ["col2"], ["col2", "col4"], [1, 3]]
)
@pytest.mark.parametrize("infer_datetime_format", [True, False])
@pytest.mark.parametrize("keep_date_col", [True, False])
@pytest.mark.parametrize(
"date_parser", [None, lambda x: pandas.datetime.strptime(x, "%Y-%m-%d")]
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("cache_dates", [True, False])
def test_read_csv_datetime(
self,
parse_dates,
infer_datetime_format,
keep_date_col,
date_parser,
dayfirst,
cache_dates,
):
if (
StorageFormat.get() == "Omnisci"
and isinstance(parse_dates, list)
and ("col4" in parse_dates or 3 in parse_dates)
):
pytest.xfail(
"In some cases read_csv with `parse_dates` with OmniSci storage format outputs incorrect result - issue #3081"
)
raising_exceptions = io_ops_bad_exc # default value
if isinstance(parse_dates, dict) and callable(date_parser):
# In this case raised TypeError: <lambda>() takes 1 positional argument but 2 were given
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(date_parser),
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
infer_datetime_format=infer_datetime_format,
keep_date_col=keep_date_col,
date_parser=date_parser,
dayfirst=dayfirst,
cache_dates=cache_dates,
)
# Iteration tests
@pytest.mark.parametrize("iterator", [True, False])
def test_read_csv_iteration(self, iterator):
filename = pytest.csvs_names["test_read_csv_regular"]
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(filename, chunksize=500, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=500, iterator=iterator)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
def test_read_csv_encoding_976(self):
file_name = "modin/pandas/test/data/issue_976.csv"
names = [str(i) for i in range(11)]
kwargs = {
"sep": ";",
"names": names,
"encoding": "windows-1251",
}
df1 = pd.read_csv(file_name, **kwargs)
df2 = pandas.read_csv(file_name, **kwargs)
# these columns contain data of various types in partitions
# see #1931 for details;
df1 = df1.drop(["4", "5"], axis=1)
df2 = df2.drop(["4", "5"], axis=1)
df_equals(df1, df2)
# Quoting, Compression parameters tests
@pytest.mark.parametrize("compression", ["infer", "gzip", "bz2", "xz", "zip"])
@pytest.mark.parametrize("encoding", [None, "latin8", "utf16"])
@pytest.mark.parametrize("engine", [None, "python", "c"])
def test_read_csv_compression(self, make_csv_file, compression, encoding, engine):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename, encoding=encoding, compression=compression
)
compressed_file_path = (
f"{unique_filename}.{COMP_TO_EXT[compression]}"
if compression != "infer"
else unique_filename
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=compressed_file_path,
compression=compression,
encoding=encoding,
engine=engine,
)
@pytest.mark.parametrize(
"encoding",
[
None,
"ISO-8859-1",
"latin1",
"iso-8859-1",
"cp1252",
"utf8",
pytest.param(
"unicode_escape",
marks=pytest.mark.skip(
condition=sys.version_info < (3, 9),
reason="https://bugs.python.org/issue45461",
),
),
"raw_unicode_escape",
"utf_16_le",
"utf_16_be",
"utf32",
"utf_32_le",
"utf_32_be",
"utf-8-sig",
],
)
def test_read_csv_encoding(self, make_csv_file, encoding):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, encoding=encoding)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
encoding=encoding,
)
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("lineterminator", [None, "x", "\n"])
@pytest.mark.parametrize("escapechar", [None, "d", "x"])
@pytest.mark.parametrize("dialect", ["test_csv_dialect", None])
def test_read_csv_file_format(
self,
make_csv_file,
thousands,
decimal,
lineterminator,
escapechar,
dialect,
):
if Engine.get() != "Python" and lineterminator == "x":
pytest.xfail("read_csv with Ray engine outputs empty frame - issue #2493")
elif Engine.get() != "Python" and escapechar:
pytest.xfail(
"read_csv with Ray engine fails with some 'escapechar' parameters - issue #2494"
)
elif Engine.get() != "Python" and dialect:
pytest.xfail(
"read_csv with Ray engine fails with `dialect` parameter - issue #2508"
)
unique_filename = get_unique_filename()
if dialect:
test_csv_dialect_params = {
"delimiter": "_",
"doublequote": False,
"escapechar": "\\",
"quotechar": "d",
"quoting": csv.QUOTE_ALL,
}
csv.register_dialect(dialect, **test_csv_dialect_params)
dialect = csv.get_dialect(dialect)
make_csv_file(filename=unique_filename, **test_csv_dialect_params)
else:
make_csv_file(
filename=unique_filename,
thousands_separator=thousands,
decimal_separator=decimal,
escapechar=escapechar,
line_terminator=lineterminator,
)
eval_io(
check_exception_type=None, # issue #2320
raising_exceptions=None,
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
thousands=thousands,
decimal=decimal,
lineterminator=lineterminator,
escapechar=escapechar,
dialect=dialect,
)
@pytest.mark.parametrize(
"quoting",
[csv.QUOTE_ALL, csv.QUOTE_MINIMAL, csv.QUOTE_NONNUMERIC, csv.QUOTE_NONE],
)
@pytest.mark.parametrize("quotechar", ['"', "_", "d"])
@pytest.mark.parametrize("doublequote", [True, False])
@pytest.mark.parametrize("comment", [None, "#", "x"])
def test_read_csv_quoting(
self,
make_csv_file,
quoting,
quotechar,
doublequote,
comment,
):
# in these cases escapechar should be set, otherwise error occures
# _csv.Error: need to escape, but no escapechar set"
use_escapechar = (
not doublequote and quotechar != '"' and quoting != csv.QUOTE_NONE
)
escapechar = "\\" if use_escapechar else None
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment_col_char=comment,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment=comment,
)
# Error Handling parameters tests
@pytest.mark.parametrize("warn_bad_lines", [True, False, None])
@pytest.mark.parametrize("error_bad_lines", [True, False, None])
@pytest.mark.parametrize("on_bad_lines", ["error", "warn", "skip", None])
def test_read_csv_error_handling(
self,
warn_bad_lines,
error_bad_lines,
on_bad_lines,
):
# in that case exceptions are raised both by Modin and pandas
# and tests pass
raise_exception_case = on_bad_lines is not None and (
error_bad_lines is not None or warn_bad_lines is not None
)
if (
not raise_exception_case
and Engine.get() not in ["Python", "Cloudpython"]
and StorageFormat.get() != "Omnisci"
):
pytest.xfail("read_csv doesn't raise `bad lines` exceptions - issue #2500")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_bad_lines"],
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
on_bad_lines=on_bad_lines,
)
# Internal parameters tests
@pytest.mark.parametrize("use_str_data", [True, False])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize("delimiter", [",", " "])
@pytest.mark.parametrize("delim_whitespace", [True, False])
@pytest.mark.parametrize("low_memory", [True, False])
@pytest.mark.parametrize("memory_map", [True, False])
@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"])
def test_read_csv_internal(
self,
make_csv_file,
use_str_data,
engine,
delimiter,
delim_whitespace,
low_memory,
memory_map,
float_precision,
):
# In this case raised TypeError: cannot use a string pattern on a bytes-like object,
# so TypeError should be excluded from raising_exceptions list in order to check, that
# the same exceptions are raised by Pandas and Modin
case_with_TypeError_exc = (
engine == "python"
and delimiter == ","
and delim_whitespace
and low_memory
and memory_map
and float_precision is None
)
raising_exceptions = io_ops_bad_exc # default value
if case_with_TypeError_exc:
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
kwargs = {
"engine": engine,
"delimiter": delimiter,
"delim_whitespace": delim_whitespace,
"low_memory": low_memory,
"memory_map": memory_map,
"float_precision": float_precision,
}
unique_filename = get_unique_filename()
if use_str_data:
str_delim_whitespaces = (
"col1 col2 col3 col4\n5 6 7 8\n9 10 11 12\n"
)
eval_io_from_str(
str_delim_whitespaces,
unique_filename,
raising_exceptions=raising_exceptions,
**kwargs,
)
else:
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
raising_exceptions=raising_exceptions,
**kwargs,
)
# Issue related, specific or corner cases
@pytest.mark.parametrize("nrows", [2, None])
def test_read_csv_bad_quotes(self, nrows):
csv_bad_quotes = (
'1, 2, 3, 4\none, two, three, four\nfive, "six", seven, "eight\n'
)
unique_filename = get_unique_filename()
eval_io_from_str(csv_bad_quotes, unique_filename, nrows=nrows)
def test_read_csv_categories(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("encoding_errors", ["strict", "ignore"])
@pytest.mark.parametrize("parse_dates", [False, ["timestamp"]])
@pytest.mark.parametrize("index_col", [None, 0, 2])
@pytest.mark.parametrize("header", ["infer", 0])
@pytest.mark.parametrize(
"names",
[
None,
["timestamp", "symbol", "high", "low", "open", "close", "spread", "volume"],
],
)
def test_read_csv_parse_dates(
self, names, header, index_col, parse_dates, encoding, encoding_errors
):
if names is not None and header == "infer":
pytest.xfail(
"read_csv with Ray engine works incorrectly with date data and names parameter provided - issue #2509"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_time_parsing.csv",
names=names,
header=header,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
encoding_errors=encoding_errors,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_csv_s3(self, storage_options):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="s3://noaa-ghcn-pds/csv/1788.csv",
storage_options=storage_options,
)
@pytest.mark.parametrize("names", [list("XYZ"), None])
@pytest.mark.parametrize("skiprows", [1, 2, 3, 4, None])
def test_read_csv_skiprows_names(self, names, skiprows):
if StorageFormat.get() == "Omnisci" and names is None and skiprows in [1, None]:
# If these conditions are satisfied, columns names will be inferred
# from the first row, that will contain duplicated values, that is
# not supported by `Omnisci` storage format yet.
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2239.csv",
names=names,
skiprows=skiprows,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas(self):
with pytest.warns(UserWarning):
# This tests that we default to pandas on a buffer
from io import StringIO
pd.read_csv(
StringIO(open(pytest.csvs_names["test_read_csv_regular"], "r").read())
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas_url(self):
# We haven't implemented read_csv from https, but if it's implemented, then this needs to change
eval_io(
fn_name="read_csv",
modin_warning=UserWarning,
# read_csv kwargs
filepath_or_buffer="https://raw.githubusercontent.com/modin-project/modin/master/modin/pandas/test/data/blah.csv",
# It takes about ~17Gb of RAM for Omnisci to import the whole table from this test
# because of too many (~1000) string columns in it. Taking a subset of columns
# to be able to run this test on low-RAM machines.
usecols=[0, 1, 2, 3] if StorageFormat.get() == "Omnisci" else None,
)
@pytest.mark.parametrize("nrows", [21, 5, None])
@pytest.mark.parametrize("skiprows", [4, 1, 500, None])
def test_read_csv_newlines_in_quotes(self, nrows, skiprows):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/newlines.csv",
nrows=nrows,
skiprows=skiprows,
cast_to_str=StorageFormat.get() != "Omnisci",
)
def test_read_csv_sep_none(self):
eval_io(
fn_name="read_csv",
modin_warning=ParserWarning,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
sep=None,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_incorrect_data(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.json",
)
@pytest.mark.parametrize(
"kwargs",
[
{"names": [5, 1, 3, 4, 2, 6]},
{"names": [0]},
{"names": None, "usecols": [1, 0, 2]},
{"names": [3, 1, 2, 5], "usecols": [4, 1, 3, 2]},
],
)
def test_read_csv_names_neq_num_cols(self, kwargs):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2074.csv",
**kwargs,
)
def test_read_csv_wrong_path(self):
raising_exceptions = [e for e in io_ops_bad_exc if e != FileNotFoundError]
eval_io(
fn_name="read_csv",
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer="/some/wrong/path.csv",
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.parametrize("header", [False, True])
@pytest.mark.parametrize("mode", ["w", "wb+"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_to_csv(self, header, mode):
pandas_df = generate_dataframe()
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_csv",
extension="csv",
header=header,
mode=mode,
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_dataframe_to_csv(self):
pandas_df = pandas.read_csv(pytest.csvs_names["test_read_csv_regular"])
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_csv", extension="csv"
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_series_to_csv(self):
pandas_s = pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], usecols=["col1"]
).squeeze()
modin_s = pd.Series(pandas_s)
eval_to_file(
modin_obj=modin_s, pandas_obj=pandas_s, fn="to_csv", extension="csv"
)
def test_read_csv_within_decorator(self):
@dummy_decorator()
def wrapped_read_csv(file, method):
if method == "pandas":
return pandas.read_csv(file)
if method == "modin":
return pd.read_csv(file)
pandas_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="pandas"
)
modin_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="modin"
)
if StorageFormat.get() == "Omnisci":
# Aligning DateTime dtypes because of the bug related to the `parse_dates` parameter:
# https://github.com/modin-project/modin/issues/3485
modin_df, pandas_df = align_datetime_dtypes(modin_df, pandas_df)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_csv_file_handle(self, read_mode, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename)
with open(unique_filename, mode=read_mode) as buffer:
df_pandas = pandas.read_csv(buffer)
buffer.seek(0)
df_modin = pd.read_csv(buffer)
df_equals(df_modin, df_pandas)
def test_unnamed_index(self):
def get_internal_df(df):
partition = read_df._query_compiler._modin_frame._partitions[0][0]
return partition.to_pandas()
path = "modin/pandas/test/data/issue_3119.csv"
read_df = pd.read_csv(path, index_col=0)
assert get_internal_df(read_df).index.name is None
read_df = pd.read_csv(path, index_col=[0, 1])
for name1, name2 in zip(get_internal_df(read_df).index.names, [None, "a"]):
assert name1 == name2
def test_read_csv_empty_frame(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=["col1"],
index_col="col1",
)
@pytest.mark.parametrize(
"skiprows",
[
lambda x: x > 20,
lambda x: True,
lambda x: x in [10, 20],
pytest.param(
lambda x: x << 10,
marks=pytest.mark.skipif(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
),
),
],
)
def test_read_csv_skiprows_corner_cases(self, skiprows):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
skiprows=skiprows,
)
class TestTable:
def test_read_table(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
)
def test_read_table_within_decorator(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
@dummy_decorator()
def wrapped_read_table(file, method):
if method == "pandas":
return pandas.read_table(file)
if method == "modin":
return pd.read_table(file)
pandas_df = wrapped_read_table(unique_filename, method="pandas")
modin_df = wrapped_read_table(unique_filename, method="modin")
df_equals(modin_df, pandas_df)
def test_read_table_empty_frame(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
usecols=["col1"],
index_col="col1",
)
class TestParquet:
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension="parquet")
make_parquet_file(filename=unique_filename)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_directory(self, make_parquet_file, columns): #
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, directory=True)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_partitioned_directory(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, partitioned_columns=["col1"])
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename)
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
pandas_df.set_index(["idx", "A"]).to_parquet(unique_filename)
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
os.remove(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index_partitioned(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename, partition_cols=["A"])
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
shutil.rmtree(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_hdfs(self):
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path="modin/pandas/test/data/hdfs.parquet",
)
@pytest.mark.parametrize("path_type", ["url", "object"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_s3(self, path_type):
dataset_url = "s3://modin-datasets/testing/test_data.parquet"
if path_type == "object":
import s3fs
fs = s3fs.S3FileSystem(anon=True)
with fs.open(dataset_url, "rb") as file_obj:
eval_io("read_parquet", path=file_obj)
else:
eval_io("read_parquet", path=dataset_url, storage_options={"anon": True})
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_without_metadata(self):
"""Test that Modin can read parquet files not written by pandas."""
from pyarrow import csv
from pyarrow import parquet
parquet_fname = get_unique_filename(extension="parquet")
csv_fname = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.to_csv(csv_fname, index=False)
# read into pyarrow table and write it to a parquet file
t = csv.read_csv(csv_fname)
parquet.write_table(t, parquet_fname)
df_equals(
pd.read_parquet(parquet_fname), pandas.read_parquet(parquet_fname)
)
finally:
teardown_test_files([parquet_fname, csv_fname])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_to_parquet(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
parquet_eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_parquet",
extension="parquet",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_2462(self):
test_df = pandas.DataFrame({"col1": [["ad_1", "ad_2"], ["ad_3"]]})
with tempfile.TemporaryDirectory() as directory:
path = f"{directory}/data"
os.makedirs(path)
test_df.to_parquet(path + "/part-00000.parquet")
read_df = pd.read_parquet(path)
df_equals(test_df, read_df)
class TestJson:
@pytest.mark.parametrize("lines", [False, True])
def test_read_json(self, make_json_file, lines):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf=make_json_file(lines=lines),
lines=lines,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_json_s3(self, storage_options):
eval_io(
fn_name="read_json",
path_or_buf="s3://modin-datasets/testing/test_data.json",
lines=True,
orient="records",
storage_options=storage_options,
)
def test_read_json_categories(self):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf="modin/pandas/test/data/test_categories.json",
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize(
"data",
[json_short_string, json_short_bytes, json_long_string, json_long_bytes],
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_string_bytes(self, data):
with pytest.warns(UserWarning):
modin_df = pd.read_json(data)
# For I/O objects we need to rewind to reuse the same object.
if hasattr(data, "seek"):
data.seek(0)
df_equals(modin_df, pandas.read_json(data))
def test_to_json(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_json", extension="json"
)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_json_file_handle(self, make_json_file, read_mode):
with open(make_json_file(), mode=read_mode) as buf:
df_pandas = pandas.read_json(buf)
buf.seek(0)
df_modin = pd.read_json(buf)
df_equals(df_pandas, df_modin)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_metadata(self, make_json_file):
# `lines=True` is for triggering Modin implementation,
# `orient="records"` should be set if `lines=True`
df = pd.read_json(
make_json_file(ncols=80, lines=True), lines=True, orient="records"
)
parts_width_cached = df._query_compiler._modin_frame._column_widths_cache
num_splits = len(df._query_compiler._modin_frame._partitions[0])
parts_width_actual = [
len(df._query_compiler._modin_frame._partitions[0][i].get().columns)
for i in range(num_splits)
]
assert parts_width_cached == parts_width_actual
class TestExcel:
@check_file_leaks
def test_read_excel(self, make_excel_file):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=make_excel_file(),
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_engine(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
engine="openpyxl",
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_index_col(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
index_col=0,
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_all_sheets(self, make_excel_file):
unique_filename = make_excel_file()
pandas_df = | pandas.read_excel(unique_filename, sheet_name=None) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 13:15:21 2020
@author: jm
"""
#%% required libraries
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%% read data
#df_original = pd.read_csv('https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv?cachebust=5805f0ab2859cf87', encoding = 'utf-8')
df_original = pd.read_csv('data/google_mobility_report_2020-07-25.csv', encoding = 'utf-8')
df = df_original.copy()
# check columns
df.columns
# see head of data frame
df.head()
#%% filter data for Argentina only
df = df[df['country_region'] == 'Argentina']
# check resulting data
df.info()
# check NA
df.isna().any()
df.isna().sum().plot(kind = 'bar')
# drop columns with many NA
df = df.drop(columns = ['country_region_code', 'sub_region_2', 'iso_3166_2_code', 'census_fips_code'])
# rename columns
df.rename(columns = {'country_region': 'pais',
'sub_region_1': 'provincia',
'date': 'fecha',
'retail_and_recreation_percent_change_from_baseline': 'retail_and_recreation',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery_and_pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit_stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'},
inplace = True)
# drop row where 'provincia' is NA
df = df.dropna(subset = ['provincia'])
# check NA
df.isna().sum().plot(kind = 'bar')
#%% set index to plot the data
df['fecha'] = pd.to_datetime(df['fecha'])
df.set_index('fecha', inplace = True)
# check index
print(df.index)
#%% subsets
bsas = df[df['provincia'] == 'Buenos Aires Province']
caba = df[df['provincia'] == 'Buenos Aires']
#%% plot for CABA
plt.rcParams["figure.dpi"] = 1200
plt.figure(figsize = (10, 10))
fig, ax = plt.subplots()
# plot data
ax.plot(caba.index, caba['workplaces'], color = 'darkred', label = 'Workplaces')
ax.plot(caba.index, caba['retail_and_recreation'], color = 'darkblue', label = 'Retail and recreation')
# color the area of lockdown phase 1
p1 = caba['2020-07-01':'2020-07-17'].index
ax.fill_between(p1, -90, -30, facecolor = 'lightsteelblue', alpha = 0.3, label = 'Fase 1')
# annotate carnaval
ax.annotate('Carnaval', xy = [pd.Timestamp('2020-02-24'), -71],
xytext = [pd.Timestamp('2020-03-25'), 10],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia del trabajador
ax.annotate('Día del \ntrabajador', xy = [pd.Timestamp('2020-05-01'), -87],
xytext = [pd.Timestamp('2020-03-28'), -50],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia de la Revolucion de Mayo
ax.annotate('Día de la \nRevolución de Mayo', xy = [pd.Timestamp('2020-05-25'), -84],
xytext = [pd.Timestamp('2020-04-01'), -30],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate paso a la inmortalidad <NAME>
ax.annotate('Paso a la inmortalidad \nGral. Güemes', xy = [pd.Timestamp('2020-06-15'), -80],
xytext = [pd.Timestamp('2020-04-15'), -15],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate paso a la inmortalidad Gral. Belgrano
ax.annotate('Paso a la \ninmortalidad \nGral. Belgrano', xy = [pd.Timestamp('2020-06-20'), -55],
xytext = [pd.Timestamp('2020-05-23'), -28],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia de la independencia
ax.annotate('Día de la \nindependencia', xy = [pd.Timestamp('2020-07-09'), -80],
xytext = [pd.Timestamp('2020-06-15'), -10],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# set axis names
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval = 7))
plt.xticks(fontsize = 5, rotation = 90)
ax.set_xlabel('Fecha', size = 8)
ax.set_ylabel('% de cambio respecto a la línea base', size = 8)
ax.set_title('Ciudad Autónoma de Buenos Aires')
# set caption
caption = "@canovasjm \nFuente: Google LLC 'Google COVID-19 Community Mobility Reports' \nhttps://www.google.com/covid19/mobility/ \nConsultado: 2020-07-25"
plt.figtext(0.9, -0.05, caption, wrap = False, horizontalalignment = 'right', fontsize = 6)
# set legend
plt.legend(prop = {'size': 6})
# show plot
plt.show()
# save plot
fig.set_size_inches([10, 7])
fig.savefig('figures/caba-fase-uno.png', dpi = fig.dpi, bbox_inches = 'tight')
#%% plot for Buenos Aires
plt.rcParams["figure.dpi"] = 1200
plt.figure(figsize = (10, 10))
fig, ax = plt.subplots()
# plot data
ax.plot(bsas.index, bsas['workplaces'], color = 'darkred', label = 'Workplaces')
ax.plot(bsas.index, bsas['retail_and_recreation'], color = 'darkblue', label = 'Retail and recreation')
# color the area of lockdown phase 1
p1 = bsas['2020-07-01':'2020-07-17'].index
ax.fill_between(p1, -85, -22, facecolor = 'lightsteelblue', alpha = 0.3, label = 'Fase 1')
# annotate carnaval
ax.annotate('Carnaval', xy = [pd.Timestamp('2020-02-24'), -54],
xytext = [pd.Timestamp('2020-03-25'), 10],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia del trabajador
ax.annotate('Día del \ntrabajador', xy = [pd.Timestamp('2020-05-01'), -76],
xytext = [pd.Timestamp('2020-03-28'), -47],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia de la Revolucion de Mayo
ax.annotate('Día de la \nRevolución de Mayo', xy = [pd | .Timestamp('2020-05-25') | pandas.Timestamp |
# This tests a specific case of optimisation from the old encoder, WriteBufferingIterator.
#
# This pre-initialised the rows buffer with missing values. As such, for the first row of
# data, if these contained missing values, it did not differ from the previous row.
#
# If the first values in the row to be encoded (i.e. the slowest varying column) contained
# a missing value, then according to this metric, it did not need to be encoded, and the
# starting row marker would be non-zero.
#
# We need to ensure that these get correctly decoded!
# See ODB-533
import os
import numpy
import pandas
import pytest
from conftest import odc_modules
@pytest.mark.parametrize("odyssey", odc_modules)
def test_initial_missing1(odyssey):
check_data = {
"stringval": ["", "testing"],
"intval": [None, 12345678],
"realval": [None, 1234.56],
"doubleval": [None, 9876.54],
}
check_df = pandas.DataFrame(check_data)
data_file = os.path.join(os.path.dirname(__file__), "data/odb_533_1.odb")
df = odyssey.read_odb(data_file, single=True)
print(check_df)
print(df)
assert set(check_df.columns) == set(df.columns)
for col in df.columns:
numpy.testing.assert_array_equal(df[col], check_df[col])
@pytest.mark.parametrize("odyssey", odc_modules)
def test_initial_missing2(odyssey):
check_data = {
"stringval": ["", "testing"],
"intval": [None, 12345678],
"realval": [None, 1234.56],
"doubleval": [None, 9876.54],
"changing": [1234, 5678],
}
check_df = | pandas.DataFrame(check_data) | pandas.DataFrame |
"""
This script trains DRA on the Huys task.
"""
import itertools
import numpy as np
import pandas as pd
from time import time
from task import HuysTask
env = HuysTask(depth=3)
n_trials = int(6e4)
# Hyper params
beta = 10
N_traj = 10
nRestarts = 5
# initializing params
lmda= 1
a1 = 0.1 # learning rate for q
a2 = 0.1 # learning rate for u
y = 1 # gamma
e = 0.1 # epsilon for e-greedy
# Defining environment variables
t = env.transitions
T = env.transition_matrix
R = env.reward_matrix
t1 = np.reshape(np.repeat(np.arange(env.depth*6,(env.depth+1)*6),2),(6,2))
# Creating terminal states to add to the list of transitions
t = np.vstack((t,t1,t1)) # Adding terminal states to list of transitions
tlist = list(np.mod(t,6)+1) # Human-readable list of states (idxed 1-6 as in Huys et al.)
sTerminal = 6*env.depth # All states >= sTerminal are Terminal states
nTerminal = 12 # No. of terminal states x 2 (x2 is a silly hack for lack of motivation)
def softmax(x,beta):
b = np.max(beta*x) # This is a trick to avoid overflow errors
y = np.exp(beta*x - b) # during numerical calculations
return y/y.sum()
def policy(q,sigma,s,t,beta):
# Soft thompson sampling: softmax applied instead of max
ind_s = (t[:,0]==s) # index for transition list where state is s
zeta_s = np.random.randn(len(q[ind_s]))
p = softmax(q[ind_s] + zeta_s*sigma[ind_s],beta=beta) # choice probability vector
a = np.random.choice(np.arange(len(p)), p=p)
return a, ind_s, zeta_s, p
# Defining the cost function
def kl_mvn(m0, S0, m1, S1):
"""
Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.
Diagonal covariances are assumed. Divergence is expressed in nats.
"""
# store inv diag covariance of S1 and diff between means
N = m0.shape[0]
iS1 = np.linalg.inv(S1)
diff = m1 - m0
# kl is made of three terms
tr_term = np.trace(iS1 @ S0)
#det_term = np.log(np.linalg.det(S1)/np.linalg.det(S0))
det_term = np.trace(np.ma.log(S1)) - np.trace(np.ma.log(S0))
quad_term = diff.T @ np.linalg.inv(S1) @ diff
return .5 * (tr_term + det_term + quad_term - N)
df_allRuns = [] # List to collect results (q,sigma) from all runs
expR_allRuns= [] # List to collect expected rewards from all runs
cost_allRuns= [] # List to collect cost from all runs
obj_allRuns = [] # List to collect objective from all runs
for run in range(nRestarts):
expR = [] # List to collect expected rewards(t)
cost = [] # List to collect cost(t)
objective = [] # List to collect objective(t)
# Initializing Gaussian approximated q-distribution
sigma0 = 100 # Initial sigma for Q ~ N(q,sigma)
sigmaT = 1 # sigma for terminal states (defines min possible sigma)
q = np.zeros(len(t))
sigma = sigma0*np.ones(len(q))
sigma[-nTerminal:] = sigmaT # sigma for terminal states
q[(env.depth-1)*12:env.depth*12] = env.rewards[(env.depth-1)*12:env.depth*12] # Last depth level memories known
sigma[(env.depth-1)*12:env.depth*12] = sigmaT # depth 3 memories known
start = time()
for ii in range(n_trials):
if np.mod(ii,1000)==0:
print(f"Run {run+1}/{nRestarts}, Trial {ii}/{n_trials}")
# beta = np.clip(beta + 0.1, a_min=1, a_max=10)
# N_traj= int(beta) + 1
s0 = np.random.randint(6) # starting state
s = s0
while s < (sTerminal - nTerminal/2):
a, i_s, z,p = policy(q, sigma, s, t, beta) # Take action acc. to policy
s1 = t[i_s][a][1] # (s'|s,a)
r = R[s,s1] # r(s,a,s')
i_sa = i_s * (t[:,1]==s1) # idx for [s,a] pair
_,_,_,p1 = policy(q, sigma, s1, t, beta) # computing pi(a'|s')
q[i_sa] = q[i_sa] + a1*(r + y*np.dot(p1,q[t[:,0]==s1]) - q[i_sa]) # on-policy update
# q[idx] = q[idx] + a1*(r + y*max(q[t[:,0]==s1]) - q[idx]) # q-learning update
s = s1
# Update sigma at the end of each trial by stochastic gradient descent:
grads = []
for i in range(int(N_traj)): # Sampling N_traj to make gradient more stable
# Initialising some variables
grad = np.zeros(len(t)) # Vector of gradients, one for each sigma(s,a)
r = 0 # reward collected in current trajectory
s = s0 # initial state
while s < sTerminal:
a,i_s,z_s,p = policy(q, sigma, s, t, beta) # take action acc. to policy
s1 = t[i_s][a][1] # (s'|s,a)
r += R[s,s1] # r(s,a,s')
g = -beta * np.multiply(z_s, p) # grad(log(pi(a'|s))) for all a' available from s
g[a] += beta * z_s[a] # grad(log(pi(a|s))) for a taken
grad[i_s] += g # updating vector of gradients for all a
s = s1 # state update for next step
grads += [(r*grad)] # adding it to the list of gradients for all trajectories
grad_cost = (sigma/(sigma0**2) - 1/sigma) # gradient of DKL cost term
grad_mean = np.mean(grads, axis=0) # mean gradient of expected rewards across N_traj
# Updating sigmas for all but depth-n memories
sigma[:-(nTerminal+12)] += a2 * (grad_mean - lmda*grad_cost)[:-(nTerminal+12)]
sigma = np.clip(sigma, 1, 100)
# Compute objective every 1000 trials to plot convergence
if np.mod(ii,1000)==0:
##### Computing and storing objective for convergence ########
# Expected rewards
rewards = []
for i in range(int(1e4)):
s0 = np.random.randint(0,6)
s = s0 # random initial state
r = 0 # reward accumulated so far
while s < sTerminal:
# Draw q, choose next action, get reward, next state
a,ind_s,_,_ = policy(q,sigma,s,t,beta)
s1 = t[ind_s][a][1]
r += R[s,s1]
s = s1
rewards += [r] # reward obtained for the trial
expR += [np.mean(rewards)]
# Cost
mu = q[:-(nTerminal+12)]
S1 = np.diag(np.square(sigma[:-(nTerminal+12)]))
S2 = np.diag(np.square([sigma0]*len(mu)))
cost += [lmda * kl_mvn(mu, S1, mu, S2)]
# Objective
objective += [ (expR[-1] - cost[-1]) ]
# Some pretty printing to satisfy the restless soul
d = {'transitions':tlist, 'q':np.around(q,0), 'sigma':np.around(sigma,1)}
df= | pd.DataFrame(d) | pandas.DataFrame |
import datetime
from typing import List, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from feature_engine.dataframe_checks import (
_check_contains_inf,
_check_contains_na,
check_X,
)
from feature_engine.discretisation import (
EqualFrequencyDiscretiser,
EqualWidthDiscretiser,
)
from feature_engine.docstrings import (
Substitution,
_feature_names_in_docstring,
_fit_transform_docstring,
_n_features_in_docstring,
)
from feature_engine.selection._docstring import (
_variables_attribute_docstring,
_variables_numerical_docstring,
)
from feature_engine.selection.base_selector import BaseSelector
from feature_engine.variable_manipulation import (
_check_input_parameter_variables,
_find_or_check_numerical_variables,
)
Variables = Union[None, int, str, List[Union[str, int]]]
@Substitution(
confirm_variables=BaseSelector._confirm_variables_docstring,
variables=_variables_numerical_docstring,
variables_=_variables_attribute_docstring,
feature_names_in_=_feature_names_in_docstring,
n_features_in_=_n_features_in_docstring,
fit_transform=_fit_transform_docstring,
)
class DropHighPSIFeatures(BaseSelector):
r"""
DropHighPSIFeatures drops features which Population Stability Index (PSI) value is
above a given threshold. The PSI of a numerical feature is an indication of the
shift in its distribution; a feature with high PSI could therefore be considered
unstable.
A bigger PSI value indicates a bigger shift in the feature distribution.
Different thresholds can be used to assess the magnitude of the distribution shift
according to the PSI value. The most commonly used thresholds are:
- Below 10%, the variable has not experienced a significant shift.
- Above 25%, the variable has experienced a major shift.
- Between those two values, the shift is intermediate.
To compute the PSI the DropHighPSIFeatures splits the dataset in two:
First and foremost, the user should enter one variable which will be used to guide
the data split. This variable can be of any data type. If the user does not enter a
variable name, DropHighPSIFeatures will use the dataframe index.
Second, the user has the option to specify a proportion of observations to put in
each data set, or alternatively, provide a cut-off value.
If the user specifies a proportion through the `split_frac` parameter, the data will
be sorted to accommodate that proportion. If `split_frac` is 0.5, 50% of the
observations will go to either basis or test sets. If `split_frac` is 0.6, 60% of
the samples will go to the basis data set and the remaining 40% to the test set.
If `split_distinct` is True, the data will be sorted considering unique values in
the selected variables. Check the parameter below for more details.
If the user defines a numeric cut-off value or a specific date using the `cut_off`
parameter, the observations with value <= cut-off will go to the basis data set and
the remaining ones to the test set. For categorical values this means they are
sorted alphabetically and cut accordingly.
If the user passes a list of values in the `cut-off`, the observations with the
values in the list, will go to the basis set, and the remaining ones to the test
set.
More details in the :ref:`User Guide <psi_selection>`.
References
----------
https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations
Parameters
----------
split_col: string or int, default=None.
The variable that will be used to split the dataset into the basis and test
sets. If None, the dataframe index will be used. `split_col` can be a numerical,
categorical or datetime variable. If `split_col` is a categorical variable, and
the splitting criteria is given by `split_frac`, it will be assumed that the
labels of the variable are sorted alphabetically.
split_frac: float, default=0.5.
The proportion of observations in each of the basis and test dataframes. If
`split_frac` is 0.6, 60% of the observations will be put in the basis data set.
If `split_distinct` is True, the indicated fraction may not be achieved exactly.
See parameter `split_distinct` for more details.
If `cut_off` is not None, `split_frac` will be ignored and the data split based
on the `cut_off` value.
split_distinct: boolean, default=False.
If True, `split_frac` is applied to the vector of unique values in `split_col`
instead of being applied to the whole vector of values. For example, if the
values in `split_col` are [1, 1, 1, 1, 2, 2, 3, 4] and `split_frac` is
0.5, we have the following:
- `split_distinct=False` splits the vector in two equally sized parts:
[1, 1, 1, 1] and [2, 2, 3, 4]. This involves that 2 dataframes with 4
observations each are used for the PSI calculations.
- `split_distinct=True` computes the vector of unique values in `split_col`
([1, 2, 3, 4]) and splits that vector in two equal parts: [1, 2] and [3, 4].
The number of observations in the two dataframes used for the PSI calculations
is respectively 6 ([1, 1, 1, 1, 2, 2]) and 2 ([3, 4]).
cut_off: int, float, date or list, default=None
Threshold to split the dataset based on the `split_col` variable. If int, float
or date, observations where the `split_col` values are <= threshold will
go to the basis data set and the rest to the test set. If `cut_off` is a list,
the observations where the `split_col` values are within the list will go to the
basis data set and the remaining observations to the test set. If `cut_off` is
not None, this parameter will be used to split the data and `split_frac` will be
ignored.
switch: boolean, default=False.
If True, the order of the 2 dataframes used to determine the PSI (basis and
test) will be switched. This is important because the PSI is not symmetric,
i.e., PSI(a, b) != PSI(b, a)).
threshold: float, default = 0.25.
The threshold to drop a feature. If the PSI for a feature is >= threshold, the
feature will be dropped. The most common threshold values are 0.25 (large shift)
and 0.10 (medium shift).
bins: int, default = 10
Number of bins or intervals. For continuous features with good value spread, 10
bins is commonly used. For features with lower cardinality or highly skewed
distributions, lower values may be required.
strategy: string, default='equal_frequency'
If the intervals into which the features should be discretized are of equal
size or equal number of observations. Takes values "equal_width" for equally
spaced bins or "equal_frequency" for bins based on quantiles, that is, bins
with similar number of observations.
min_pct_empty_bins: float, default = 0.0001
Value to add to empty bins or intervals. If after sorting the variable
values into bins, a bin is empty, the PSI cannot be determined. By adding a
small number to empty bins, we can avoid this issue. Note, that if the value
added is too large, it may disturb the PSI calculation.
missing_values: str, default='raise'
Whether to perform the PSI feature selection on a dataframe with missing values.
Takes values 'raise' or 'ignore'. If 'ignore', missing values will be dropped
when determining the PSI for that particular feature. If 'raise' the transformer
will raise an error and features will not be selected.
{variables}
{confirm_variables}
Attributes
----------
features_to_drop_:
List with the features that will be dropped.
{variables_}
psi_values_:
Dictionary containing the PSI value per feature.
cut_off_:
Value used to split the dataframe into basis and test.
This value is computed when not given as parameter.
{feature_names_in_}
{n_features_in_}
Methods
-------
fit:
Find features with high PSI values.
{fit_transform}
transform:
Remove features with high PSI values.
See Also
--------
feature_engine.discretisation.EqualFrequencyDiscretiser
feature_engine.discretisation.EqualWidthDiscretiser
"""
def __init__(
self,
split_col: str = None,
split_frac: float = 0.5,
split_distinct: bool = False,
cut_off: Union[None, int, float, datetime.date, List] = None,
switch: bool = False,
threshold: float = 0.25,
bins: int = 10,
strategy: str = "equal_frequency",
min_pct_empty_bins: float = 0.0001,
missing_values: str = "raise",
variables: Variables = None,
confirm_variables: bool = False,
):
if not isinstance(split_col, (str, int, type(None))):
raise ValueError(
f"split_col must be a string an integer or None. Got "
f"{split_col} instead."
)
# split_frac and cut_off can't be None at the same time
if not split_frac and not cut_off:
raise ValueError(
f"cut_off and split_frac cannot be both set to None "
f"The current values are {split_frac, cut_off}. Please "
f"specify a value for at least one of these parameters."
)
# check split_frac only if it will be used.
if split_frac and not cut_off:
if not (0 < split_frac < 1):
raise ValueError(
f"split_frac must be a float between 0 and 1. Got {split_frac} "
f"instead."
)
if not isinstance(split_distinct, bool):
raise ValueError(
f"split_distinct must be a boolean. Got {split_distinct} instead."
)
if not isinstance(switch, bool):
raise ValueError(f"switch must be a boolean. Got {switch} instead.")
if not isinstance(threshold, (float, int)) or threshold < 0:
raise ValueError(f"threshold must be >= 0. Got {threshold} instead.")
if not isinstance(bins, int) or bins <= 1:
raise ValueError(f"bins must be an integer >= 1. Got {bins} instead.")
if strategy not in ["equal_width", "equal_frequency"]:
raise ValueError(
"strategy takes only values equal_width or equal_frequency. Got "
f"{strategy} instead."
)
if not isinstance(min_pct_empty_bins, (float, int)) or min_pct_empty_bins < 0:
raise ValueError(
f"min_pct_empty_bins must be >= 0. Got {min_pct_empty_bins} "
f"instead."
)
if missing_values not in ["raise", "ignore"]:
raise ValueError(
f"missing_values takes only values 'raise' or 'ignore'. Got "
f"{missing_values} instead."
)
if isinstance(variables, list):
if split_col in variables:
raise ValueError(
f"{split_col} cannot be used to split the data and be evaluated at "
f"the same time. Either remove {split_col} from the variables list "
f"or choose another splitting criteria."
)
super().__init__(confirm_variables)
# Check the variables before assignment.
self.variables = _check_input_parameter_variables(variables)
# Set all remaining arguments as attributes.
self.split_col = split_col
self.split_frac = split_frac
self.split_distinct = split_distinct
self.cut_off = cut_off
self.switch = switch
self.threshold = threshold
self.bins = bins
self.strategy = strategy
self.min_pct_empty_bins = min_pct_empty_bins
self.missing_values = missing_values
def fit(self, X: pd.DataFrame, y: pd.Series = None):
"""
Find features with high PSI values.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training dataset.
y : pandas series. Default = None
y is not needed in this transformer. You can pass y or None.
"""
# check input dataframe
X = check_X(X)
# If required exclude variables that are not in the input dataframe
self._confirm_variables(X)
# find numerical variables or check those entered are present in the dataframe
self.variables_ = _find_or_check_numerical_variables(X, self.variables_)
# Remove the split_col from the variables list. It might be added if the
# variables are not defined at initialization.
if self.split_col in self.variables_:
self.variables_.remove(self.split_col)
if self.missing_values == "raise":
# check if dataset contains na or inf
_check_contains_na(X, self.variables_)
_check_contains_inf(X, self.variables_)
# Split the dataframe into basis and test.
basis_df, test_df = self._split_dataframe(X)
# Check the shape of the returned dataframes for PSI calculations.
# The number of observations must be at least equal to the
# number of bins.
if min(basis_df.shape[0], test_df.shape[0]) < self.bins:
raise ValueError(
"The number of rows in the basis and test datasets that will be used "
f"in the PSI calculations must be at least larger than {self.bins}. "
"After slitting the original dataset based on the given cut_off or"
f"split_frac we have {basis_df.shape[0]} samples in the basis set, "
f"and {test_df.shape[0]} samples in the test set. "
"Please adjust the value of the cut_off or split_frac."
)
# Switch basis and test dataframes if required.
if self.switch:
test_df, basis_df = basis_df, test_df
# set up the discretizer
if self.strategy == "equal_width":
bucketer = EqualWidthDiscretiser(bins=self.bins)
else:
bucketer = EqualFrequencyDiscretiser(q=self.bins)
# Compute the PSI by looping over the features
self.psi_values_ = {}
self.features_to_drop_ = []
for feature in self.variables_:
# Discretize the features.
basis_discrete = bucketer.fit_transform(basis_df[[feature]].dropna())
test_discrete = bucketer.transform(test_df[[feature]].dropna())
# Determine percentage of observations per bin
basis_distrib, test_distrib = self._observation_frequency_per_bin(
basis_discrete, test_discrete
)
# Calculate the PSI value
self.psi_values_[feature] = np.sum(
(test_distrib - basis_distrib) * np.log(test_distrib / basis_distrib)
)
# Assess if feature should be dropped
if self.psi_values_[feature] > self.threshold:
self.features_to_drop_.append(feature)
# save input features
self._get_feature_names_in(X)
return self
def _observation_frequency_per_bin(self, basis, test):
"""
Obtain the fraction of observations per interval.
Parameters
----------
basis : pd.DataFrame.
The basis Pandas DataFrame with discretised (i.e., binned) values.
test: pd.DataFrame.
The test Pandas DataFrame with discretised (i.e., binned) values.
Returns
-------
distribution.basis: pd.Series.
Basis Pandas Series with percentage of observations per bin.
distribution.meas: pd.Series.
Test Pandas Series with percentage of observations per bin.
"""
# Compute the feature distribution for basis and test
basis_distrib = basis.value_counts(normalize=True)
test_distrib = test.value_counts(normalize=True)
# Align the two distributions by merging the buckets (bins). This ensures
# the number of bins is the same for the two distributions (in case of
# empty buckets).
distributions = (
| pd.DataFrame(basis_distrib) | pandas.DataFrame |
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] = np.nan_to_num(probdf[:, i]) # convert Inf to zero
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
msvmsc["AG"]))
probdf[:, notr] = np.nan_to_num(probdf[:, notr]) # Agg
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = [pd.DataFrame(gbv.iloc[:, 0:2]), probdf] # add ID and GRP
probdf = pd.concat(probdf, axis=1)
return probdf
def calcindex(info, msvmsc, const):
"""Calculate the index if constant is known."""
sub_id = pd.DataFrame(msvmsc.iloc[:, 0])
gbv = calcgbv(info, sub_id) # calc GEBV
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((no_individuals, notr))
indexdf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
msvmsc.iloc[:, 0+2])*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
indexdf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
msvmsc.iloc[:, (t_ind[i]+2)])*const
indexdf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
msvmsc["AG"])*const
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = [pd.DataFrame(gbv.iloc[:, 0:2]), indexdf] # add ID and GRP
indexdf = pd.concat(indexdf, axis=1)
return indexdf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
"""
Calc selection criteria (GEBV, PBTI, or index using gametic approach.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV. If selstrat is index, throrconst is a constant.
If selstrat is GEBV, throrconst can be any random value.
Returns
-------
data : pandas.DataFrame
Index: RangeIndex
Columns:
ID, Group, trait names and Aggregate Breeding Value (ABV)
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_id is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
if selstrat in ('GEBV', 'gebv'):
data = calcgbv(info, sub_id)
elif selstrat in ('PBTI', 'pbti'):
if throrconst > 1 or throrconst < 0:
sys.exit("value must be in the range of 0 and 1")
data = calcprob(info, msvmsc, throrconst)
elif selstrat in ('index', 'INDEX'):
data = calcindex(info, msvmsc, throrconst)
return data
def cov2corr(cov):
"""Convert covariance to correlation matrix."""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
with np.errstate(invalid='ignore'):
corr = cov / np.outer(std_, std_)
return corr
def aggen(us_ind, no_markers, slst, indwt):
"""Set up additive effects matrix of aggregate genotype."""
mmfinal = np.empty((len(us_ind), no_markers))
xxx = 0
for iii in us_ind:
tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
xxx = xxx + 1
return mmfinal
def chr_int(xxxxx):
"""Format chromomosome of interest parameter."""
if 'all' in xxxxx:
xxxxx = 'all'
elif 'none' in xxxxx:
xxxxx = 'none'
else:
xxxxx = np.array([int(i) for i in xxxxx])
return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
"""Map chracters to numeric (0-no of groups)."""
numnx = numnx.reset_index(drop=True)
probn = pd.unique(numnx).tolist()
alt_no = np.arange(0, len(probn), 1)
noli = numnx.tolist()
numnx = np.array(list(map(dict(zip(probn, alt_no)).get, noli, noli)))
return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, stdsim,
progress):
"""Return sim mat based on aggregate genotypes."""
snpindexxxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
if info.meff.shape[1] == 1 and not stdsim:
mat = cov_indxx
elif info.meff.shape[1] == 1 and stdsim:
mat = cov2corr(cov_indxx)
elif info.meff.shape[1] > 1:
if info.gmap.shape[1]-3 > 1:
rw_nms = pd.DataFrame(rw_nms)
rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
if progress:
print('Creating similarity matrix based on aggregate genotype')
progr(0, max(pd.unique(info.gmap.iloc[:, 0])))
tmpmt1 = aggen(us_ind, info.gmap.shape[0], slist, info.indwt)
# stores ABV covariance btw inds
mat = np.zeros((len(us_ind), len(us_ind)))
# loop over chromososomes
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(snpindexxxx[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
else:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
mat = mat + covtmpx
if progress:
progr(chrm, max(pd.unique(info.gmap.iloc[:, 0])))
if stdsim:
mat = cov2corr(mat)
return mat
def mrmcals(info, us_ind, stdsim, slist, covmat, probn, chrinterest, save,
progress):
"""Compute similarity matrix for each chromosome."""
if progress:
progr(0, info.meff.columns.size)
for i in range(info.meff.columns.size):
cov_indxx = np.zeros((len(us_ind), len(us_ind)))
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(np.arange(0, info.gmap.shape[0], 1
)[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1: # map is 1
covtmpx = abs(dgmrm(slist[0][i][:, s_ind], covmat[0][chrm-1]))
else: # if map is more than 1
covtmpx = abs(dgmrm(slist[0][i][us_ind[:, None], s_ind],
covmat[probn][chrm-1]))
cov_indxx = cov_indxx + covtmpx # sums up chrm-specific sims
if len(pd.unique(info.group.iloc[:, 0].astype(str))) == 1:
writechrunspec(covtmpx, chrinterest, chrm,
info.meff.columns[i], stdsim)
else:
writechr(covtmpx, chrinterest, chrm, info.meff.columns[i],
probn, stdsim) # write sim to file
if stdsim:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Stdsim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Stdsim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov2corr(cov_indxx)) # write std sim mats
else:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Sim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Sim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov_indxx) # write sim matrices
if progress:
progr(i + 1, info.meff.columns.size)
return cov_indxx
def simmat_g(info, covmat, sub_id, chrinterest, save=False, stdsim=False,
progress=False):
"""
Compute similarity matrices using gametic approach.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
chrinterest : str or list of int
list of chromosome numbers of interest or str with "all" or "none"
save : bool, optional; write trait-specific sim mats to file if true
stdsim : bool, optional; print write std sim mats to file if true
progress : bool, optional; print progress of the task if true
Returns
-------
multgrpcov : list containing simimlarity matrices for each group
"""
if sub_id is None:
inda = np.arange(0, info.gmat.shape[0], 1)
sub_id = pd.DataFrame(info.group.iloc[inda, 1])
aaa = subindcheck(info, sub_id)
else:
aaa = subindcheck(info, sub_id)
chrinterest = chr_int(chrinterest)
slist = traitspecmatrices(info.gmat[aaa, :], info.meff) # trt-spec mat
grp = info.gmap.shape[1]-3
if (grp == 1 and len(pd.unique(info.group.iloc[:, 0].astype(str))) > 1):
print("The same map will be used for all groups")
numbers, probn = grtonum(info.group.iloc[aaa, 0].astype(str))
multgrpcov = []
for gnp in range(grp):
multgrpcov.append([])
if grp == 1:
us_ind = np.arange(start=0, stop=info.gmat[aaa, :].shape[0],
step=1)
else:
tng = numbers == gnp
us_ind = np.array(list(compress(np.arange(0, len(tng), 1),
tng))).T
print("Processing group ", probn[gnp])
rw_nms = info.group.iloc[aaa, 1].reset_index(drop=True).astype(
str)[us_ind]
cov_indxx = mrmcals(info, us_ind, stdsim, slist, covmat, probn[gnp],
chrinterest, save, progress)
multgrpcov[int(gnp)].append(
datret(info, rw_nms, probn[gnp], us_ind, slist, covmat,
cov_indxx, stdsim, progress))
if len(probn) == 1:
break
if grp > 1 and len(probn):
multgrpcov = dict(zip(probn, multgrpcov))
return multgrpcov
def submsvmsc(msvmsc, sub_idz):
"""Extract index in msvmsc data frame."""
sub_idz = pd.DataFrame(sub_idz)
numbs = msvmsc.iloc[:, 0].astype(str).tolist()
sub_idz = sub_idz.reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
if sub_idz is not None:
for i in mal:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
for i in fem:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
return mal1, fem1
def pot_parents(info, data, selmale, selfm):
"""Subset individuals of interest."""
trait_names = info.meff.columns
if trait_names.size == 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_dam, :]
elif trait_names.size > 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_dam, :]
matlist = np.array(np.meshgrid(
datamale.iloc[:, 0], datafemale.iloc[:, 0])).T.reshape(-1, 2)
ids = np.array(np.meshgrid(
datamale.iloc[:, 1], datafemale.iloc[:, 1])).T.reshape(-1, 2)
if trait_names.size == 1:
matndat = pd.DataFrame(index=range(matlist.shape[0]), columns=range(
4+trait_names.size))
else:
matndat = pd.DataFrame(
index=range(matlist.shape[0]), columns=range(5+trait_names.size))
matndat.iloc[:, [0, 1]] = ids
matndat.iloc[:, [2, 3]] = matlist
return matndat
def selsgebv(notr, matndat, gbv, maxmale):
"""Calculate breeding values for each trait (zygote)."""
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
elif notr > 1:
matndat.iloc[:, 4:(5+notr)] = (np.array(
gbv.iloc[mal, 2:(notr+3)]) + np.array(gbv.iloc[fem, 2:(notr+3)]))/2
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selspbtizyg(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate prob of breeding top inds (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
ttt = np.quantile(gbv.iloc[:, 0+2], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4], scale=np.sqrt(
msvtemp))
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+i], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+i], scale=np.sqrt(msvtemp))
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+notr], q=1-throrconst)
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+notr], scale=np.sqrt(msvtemp.ravel()))
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selsindex(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate the index if constant is known (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = matndat.iloc[:, 4] + np.sqrt(msvtemp)*throrconst
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = matndat.iloc[:, 4+i] + np.sqrt(
msvtemp)*throrconst
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = matndat.iloc[:, 4+notr] + (
np.sqrt(msvtemp)*throrconst).ravel()
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = pd.DataFrame(mmat)
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def subindcheckzyg(info, sub_idz):
"""Check sex and if matepairs provided in sub_idz are in group data."""
numbs = info.group.iloc[:, 1].astype(str).tolist()
sub_idz = pd.DataFrame(sub_idz).reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
if len(pd.unique(info.group.iloc[mal1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of males")
if len(pd.unique(info.group.iloc[fem1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of females")
idn = sub_idz.reset_index(drop=True)
mgp = list(set(info.group.iloc[mal1, 0]))
fgp = list(set(info.group.iloc[fem1, 0]))
if len(mgp) > 1 or len(fgp) > 1:
sys.exit("multiple sexes detected in data")
probn = [mgp[0], fgp[0]]
return mal1, fem1, idn, probn
def calcgbvzygsub(info, sub_idz):
"""Calc breeding values for matepairs."""
mal1, fem1, idn, _ = subindcheckzyg(info, sub_idz)
no_individuals, trait_names = idn.shape[0], info.meff.columns
notr = trait_names.size
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, 0] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, i] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i]
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "FemaleIndex", fem1, True) # insert ID
gbv.insert(0, "MaleIndex", mal1, True) # insert ID
gbv.insert(0, "FemaleID", idn.iloc[:, 1], True) # insert ID
gbv.insert(0, "MaleID", idn.iloc[:, 0], True) # insert ID
return gbv
def calcprobzygsub(info, msvmsc, thresh, sub_idz):
"""Calculate the probability of breeding top individuals."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
gbvall = calcgbv(info, None)
if notr == 1:
probdf = np.zeros((gbv.shape[0], notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+4)], scale=np.sqrt(msvmsc111))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
probdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+4)], scale=np.sqrt(msvmsc111))
probdf[:, i] = np.nan_to_num(probdf[:, i])
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+4)], scale=np.sqrt(msvmsc111.ravel()))
probdf[:, notr] = np.nan_to_num(probdf[:, notr])
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = pd.concat([gbv.iloc[:, 0:4], probdf], axis=1)
return probdf
def calcindexzygsub(info, msvmsc, const, sub_idz):
"""Calc index matepairs if constant is known."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((gbv.shape[0], notr))
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
indexdf[:, 0] = gbv.iloc[:, (0+4)] + np.sqrt(msvmsc111)*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
indexdf[:, i] = gbv.iloc[:, (i+4)] + np.sqrt(msvmsc111)*const
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
indexdf[:, notr] = gbv.iloc[:, (notr+4)] + (
np.sqrt(msvmsc111)*const).ravel()
indexdf = | pd.DataFrame(indexdf) | pandas.DataFrame |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Display functions for leaderboard entries."""
from typing import Sequence
from neural_testbed.leaderboard import score
import numpy as np
import pandas as pd
SCORE_COL = 'kl_estimate'
DISPLAY_COLS = (
'agent_name', 'normalized_kl', 'normalized_stderr',
'mean_test_acc', 'mean_train_seconds', 'mean_evaluation_seconds'
)
def _stderr(x):
return np.std(x) / np.sqrt(len(x))
def _extract_tau_data(data: score.LeaderboardData, tau: int) -> pd.DataFrame:
assert tau in data.df.tau.unique()
return data.df[data.df.tau == tau].copy()
def _compute_mean(df: pd.DataFrame, column_name: str):
"""Computes mean running time based on column column_name."""
if column_name not in df.columns:
df[column_name] = 0
mean_df = (df.groupby('agent_name')[column_name]
.agg([np.mean])
.rename({'mean': 'mean_' + column_name}, axis=1)
.reset_index())
return mean_df
def _compute_stderr(df: pd.DataFrame, num_seed_per_class: int = 10):
"""Computes stderr by grouping the problems based on their seeds."""
assert 'seed' in df.columns
df['seed_class'] = df['seed'].apply(lambda x: x % num_seed_per_class)
kl_seed_df = df.groupby(['agent_name',
'seed_class'])['kl_estimate'].mean().reset_index()
stderr_df = kl_seed_df.groupby(['agent_name'
])['kl_estimate'].agg([_stderr]).reset_index()
stderr_df = stderr_df.rename({'_stderr': 'stderr_kl'}, axis=1)
return stderr_df
def compute_normalization(data: score.LeaderboardData,
agent_name: str = 'baseline',
tau: int = 1) -> float:
df = _extract_tau_data(data, tau)
return df[df.agent_name == agent_name]['kl_estimate'].mean()
def compute_ranking(data: score.LeaderboardData,
num_seed_per_class: int = 10,
tau: int = 1,
kl_limit: float = 1e6) -> pd.DataFrame:
"""Compute the ranking based on the average KL divergence."""
# Subsample data to a specific tau
df = _extract_tau_data(data, tau)
if 'baseline:uniform_class_probs' in data.df.agent_name.unique():
normalizing_score = compute_normalization(
data, 'baseline:uniform_class_probs', tau)
else:
print('WARNING: uniform_class_probs agent not included in data, '
'no normalization is applied.')
normalizing_score = 1
# Calculate the mean KL
rank_df = _compute_mean(df, column_name=SCORE_COL)
# Calculate the std error
stderr_df = _compute_stderr(df, num_seed_per_class=num_seed_per_class)
rank_df = pd.merge(rank_df, stderr_df, on='agent_name', how='left')
# Calculate the mean test acc
testacc_df = _compute_mean(df, column_name='test_acc')
rank_df = pd.merge(rank_df, testacc_df, on='agent_name', how='left')
# Calculate the mean training time
traintime_df = _compute_mean(df, column_name='train_seconds')
rank_df = pd.merge(rank_df, traintime_df, on='agent_name', how='left')
# Calculate the mean evaluation time
evaltime_df = _compute_mean(df, column_name='evaluation_seconds')
rank_df = pd.merge(rank_df, evaltime_df, on='agent_name', how='left')
# TODO(author2): Work out what's going wrong with unhashable hypers e.g. list.
for var in data.sweep_vars:
try:
df[var].unique()
except TypeError:
df[var] = df[var].astype(str)
hyper_df = df[data.sweep_vars].drop_duplicates()
df = | pd.merge(rank_df, hyper_df, on='agent_name', how='left') | pandas.merge |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ======================================================================================================================== #
# Project : Explainable Recommendation (XRec) #
# Version : 0.1.0 #
# File : \criteo.py #
# Language : Python 3.8 #
# ------------------------------------------------------------------------------------------------------------------------ #
# Author : <NAME> #
# Email : <EMAIL> #
# URL : https://github.com/john-james-ai/xrec #
# ------------------------------------------------------------------------------------------------------------------------ #
# Created : Sunday, December 26th 2021, 3:56:00 pm #
# Modified : Friday, January 14th 2022, 6:45:52 pm #
# Modifier : <NAME> (<EMAIL>) #
# ------------------------------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2021 Bryant St. Labs #
# ======================================================================================================================== #
import os
import random
import pandas as pd
import numpy as np
import logging
import pickle
from datetime import datetime
from cvr.utils.printing import Printer
from cvr.data.datastore import DataStore
from cvr.visuals.visualize import Visual
from cvr.utils.format import titlelize_df, s_to_dict
from cvr.data import (
criteo_columns,
feature_columns,
target_columns,
numeric_columns,
categorical_columns,
)
from cvr.utils.config import DataConfig
from cvr.data import (
numeric_descriptive_stats,
categorical_descriptive_stats,
numeric_columns,
categorical_columns,
)
from cvr.data.outliers import OutlierDetector
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------------------------------ #
class DataProfiler:
"""Profiles the Criteo Data."""
def __init__(
self,
datastore: DataStore,
visual: Visual,
stage: str = "staged",
version: str = "prod",
) -> None:
self._stage = stage
self._version = version
self._datastore = datastore
self._visual = visual
self._outlier_detector = outlier_detector
self._printer = Printer()
self._metadata = CriteoMD(stage=stage, version=version)
self._dataset_name = "Criteo Sponsored Search Conversion Log Dataset"
self._df = None
self._stats = None
self._stats_numeric = None
self._stats_categorical = None
self._stats_summary = None
self._outlier_labels = None
def load(self) -> None:
self._df = self._datastore.read()
self._compute_stats()
def _compute_stats(self) -> None:
"""Computes stats for entire dataset and for conversions."""
# Compute on entire dataset
self._stats = self._metadata.get_statistics(self._df)
self._stats_numeric = self._stats["numeric"]
self._stats_categorical = self._stats["categorical"]
self._stats_summary = self._stats["summary"]
def _load_if_null(self) -> None:
if self._df is None:
self.load()
@property
def summary(self) -> None:
"""Provides overall summary at dataet level."""
self._load_if_null()
self._printer.print_title(self._dataset_name)
d = {}
d["Rows"], d["Columns"] = self._df.shape
d["Complete Samples"] = len(self._get_complete())
d["Size Memory"] = self._df.memory_usage().sum()
self._printer.print_dictionary(d, "Overview")
d = {}
d["Users"] = self._df["user_id"].nunique()
d["Products"] = self._df["product_id"].nunique()
d["Clicks"] = self._df.shape[0]
d["Conversions"] = len(self._df.loc[self._df["sale"] == 1])
d["Conversion Rate"] = round(d["Conversions"] / d["Clicks"] * 100, 2)
self._printer.print_dictionary(d, "Basic Statistics")
d = {}
d["Cells"] = self._stats_summary["cells"]
d["Missing"] = self._stats_summary["missing"]
d["MissingNess"] = self._stats_summary["missingness"]
self._printer.print_dictionary(d, "Missing")
d = {}
d["Numeric Targets"] = self._stats_summary["numeric_targets"]
d["Categorical Targets"] = self._stats_summary["categorical_targets"]
d["Numeric Features"] = self._stats_summary["numeric_features"]
d["Categorical Features"] = self._stats_summary["categorical_features"]
self._printer.print_dictionary(d, "Data Types")
d = {}
d["First Click"] = self._stats_summary["first_click"]
d["Last Click"] = self._stats_summary["last_click"]
d["Period"] = self._stats_summary["period"]
self._printer.print_dictionary(d, "Period")
return self
@property
def info(self) -> None:
"""Prints a summary of each column."""
self._load_if_null()
columns = [
"column",
"datatype",
"count",
"unique",
"uniqueness",
"missing",
"missingness",
"example",
]
dfn = self._stats_numeric[columns]
dfc = self._stats_categorical[columns]
df = | pd.concat([dfn, dfc], axis=0) | pandas.concat |
import time
import datetime
import pandas as pd
from pytrademl.utilities.ticker_utilities import obtain_tickers
from pytrademl.utilities.object_utilities import import_object
from pytrademl.utilities.key_utilities import load_key
from alpha_vantage.timeseries import TimeSeries
from pathlib import Path
def get_data_from_alphaVantage(key, reload=False, ticker_file="ETFTickers", symbol_market="TSX", output_size="full"):
root_dir = Path(__file__).resolve().parent.parent
if reload:
tickers = obtain_tickers(ticker_file)
else:
tickers = import_object((root_dir / ticker_file).with_suffix('.pickle'))
folder_name = root_dir / 'dataframes' / (ticker_file.split("Tickers")[0])
folder_name.mkdir(parents=True, exist_ok=True)
ts = TimeSeries(key=key, output_format='pandas')
# If today is a weekend, go back to the friday
today = datetime.datetime.today()
weekday = today.weekday()
if weekday == 5:
today = datetime.datetime.today() - datetime.timedelta(days=1)
elif weekday == 6:
today = datetime.datetime.today() - datetime.timedelta(days=2)
else:
pass
today = today.strftime("%Y-%m-%d")
totalAPICalls = 0
maxAPICalls = 500 # Max of 500 API requests per day on free license
api_calls_per_minute = 5
sleep_delay = int(60 / api_calls_per_minute) + 2
for ticker in tickers:
# Check if a dataframe for the ticker already exists
ticker_file = (folder_name / ticker[1]).with_suffix('.csv')
if Path(ticker_file).is_file():
df = pd.read_csv(ticker_file)
df.set_index('date', inplace=True)
if today in df.index: # Check if ticker information is up to date
print(ticker[1], "is up to date.")
continue
if totalAPICalls < maxAPICalls:
if symbol_market == "TSX":
tickername = 'TSX:'+ticker[1]
try:
data, _ = ts.get_daily_adjusted(tickername, outputsize=output_size) # full or compact
totalAPICalls = totalAPICalls + 1
print('Retrieved data for:',
tickername, '(', ticker[0], ')')
got_flag = True
except Exception as e:
print("Error retrieving data for", tickername+":", e)
if output_size == "full":
try:
tickername = 'TSX:'+ticker[1]
data, _ = ts.get_daily_adjusted(
tickername, outputsize='compact')
totalAPICalls = totalAPICalls + 1
print('Retrieved compact data for:',
tickername, '(', ticker[0], ')')
got_flag = True
except Exception as e:
print("Error retrieving data for",
tickername+":", e)
got_flag = False
time.sleep(sleep_delay)
else:
time.sleep(sleep_delay)
got_flag = False
else:
try:
data, _ = ts.get_daily_adjusted(
ticker[1], outputsize=output_size)
totalAPICalls = totalAPICalls + 1
print('Retrieved data for:',
ticker[1], '(', ticker[0], ')')
got_flag = True
except Exception as e:
print("Error retrieving data for", ticker[1]+":", e)
time.sleep(sleep_delay)
got_flag = False
if got_flag:
if Path(ticker_file).is_file():
# if os.path.exists('{}/{}.csv'.format(folder_name, ticker[1])):
data.to_csv('temp.csv')
df_new = pd.read_csv('temp.csv', parse_dates=True, index_col=0)
df_old = pd.read_csv(ticker_file, parse_dates=True, index_col=0)
# Merge and drop exact duplicates
df = pd.concat([df_new, df_old]).drop_duplicates()
# Drops duplicates with updated values, keeping the most recent data
df = df.loc[~df.index.duplicated(keep='first')]
else:
data.to_csv(ticker_file)
df = | pd.read_csv(ticker_file, parse_dates=True, index_col=0) | pandas.read_csv |
import pandas as pd
import glob2
import os
def convert2hdf(source_dir, desti_dir=os.getcwd()):
temp = []
for path_name in glob2.iglob(source_dir+'**/*.txt', recursive=True):
_, file_name = os.path.split(path_name)
file_name = file_name.split(".")[0]
file_name = ''.join([i for i in file_name if not i.isdigit()])
file_name = file_name.replace("_","")
data = open(path_name, "r").read()
temp.append([file_name,data])
df = pd.DataFrame(temp, columns=["file_name", "data"])
# print(df)
df.to_hdf("data.h5", key="df")
if __name__ == '__main__':
txtDir = "./data/"
convert2hdf(txtDir)
df = | pd.read_hdf('data.h5', 'df') | pandas.read_hdf |
"""facebook_birthdays.py
Usage:
facebook_birthdays.py -f <file>
Options:
-h, --help Show this help
--version Show the version
-f,--file <file> File to import (har file)
"""
from haralyzer import HarParser, HarPage
from bs4 import BeautifulSoup
from json import loads
import pandas as pd
from pathlib import Path
from sys import exit
import docopt
# goes through the birthday data to grab the name, month, day of your friends birthday
def json_birthday(friend_data):
friend_month = friend_data['viewer']['all_friends_by_birthday_month']['edges']
birthdays = []
for item in friend_month:
month = item['node']['month_name_in_iso8601']
for elm in item['node']['friends']['edges']:
url = elm['node']['url']
name = elm['node']['name']
day = elm['node']['birthdate']['day'] # birthdate = year, day, month
year = elm['node']['birthdate']['year']
birthdays.append({'url': url, 'name': name, 'month': month, 'day': day, 'year': year})
return birthdays
if __name__ == "__main__":
args = docopt.docopt(__doc__, version="1.0")
in_file = args["--file"]
if 'har' not in in_file.split('.')[-1]:
print("Please use a file with a '.har' extension")
exit(-1)
birthday_data = []
with open(in_file, 'r') as f:
har_parser = HarParser(loads(f.read()))
data = har_parser.har_data
# check if ['content']['mimeType'] == 'text/html' or 'json'
html_data = data['entries'][0]['response']['content']['text']
soup = BeautifulSoup(html_data, 'html.parser')
find_elm = soup.find_all('script')
for elm in find_elm: # first element is inside html data - can convert to json
if elm.string and 'birthdate' in elm.string:
beginining_text = 'ScheduledApplyEach,' # json starts with {"require"
find_text_index = len(beginining_text) + elm.string.find(beginining_text)
last_index = elm.string.rfind(');});});') # get rid of ';'s
json_data = loads(elm.string[find_text_index:last_index])
# cycle through 'today', 'recent', 'upcoming', 'viewer'
friend_data = json_data['require'][3][3][1]['__bbox']['result']['data']
birthday_data += json_birthday(friend_data)
for elm in data['entries'][1:]: # rest of entries are in json style
if 'text' in elm['response']['content']['mimeType']:
json_data = elm['response']['content']['text']
if 'birthdate' in json_data:
temp = loads(json_data)
birthday_data += json_birthday(temp['data'])
# put data into a CSV file + download to downloads folder
title = 'facebook_birthday_data.csv'
facebook_df = | pd.DataFrame(birthday_data) | pandas.DataFrame |
import pandas as pd
import networkx as nx
from sqlalchemy import create_engine
from sys import argv
schema = "theta_plus"
user_name = argv[1]
password = argv[2]
sql_scheme = 'postgresql://' + user_name + ':' + password + '@localhost:5432/ernie'
engine = create_engine(sql_scheme)
cluster_query = """SELECT cluster_no
FROM theta_plus.imm1985_1995_all_merged_mcl
ORDER BY cluster_no;"""
clusters = pd.read_sql(cluster_query, con=engine)
clusters_list = clusters['cluster_no'].astype(int).tolist()
for cluster_num in clusters_list:
citing_cited_query="""
SELECT cslu1.cluster_no AS citing_cluster, ccu.citing, cslu2.cluster_no AS cited_cluster, ccu.cited
FROM theta_plus.imm1985_1995_citing_cited ccu
JOIN (SELECT cslu.*
FROM theta_plus.imm1985_1995_cluster_scp_list_mcl cslu
JOIN theta_plus.imm1985_1995_article_score_unshuffled asu ON asu.scp = cslu.scp
WHERE asu.article_score >= 1) cslu1 ON cslu1.scp = ccu.citing
JOIN (SELECT cslu.*
FROM theta_plus.imm1985_1995_cluster_scp_list_mcl cslu
JOIN theta_plus.imm1985_1995_article_score_unshuffled asu ON asu.scp = cslu.scp
WHERE asu.article_score >= 1) cslu2 ON cslu2.scp = ccu.cited
WHERE cslu1.cluster_no!=cslu2.cluster_no AND cslu1.cluster_no= """ + str(cluster_num) + """ -- all external out-degrees
UNION
SELECT cslu1.cluster_no AS citing_cluster, ccu.citing, cslu2.cluster_no AS cited_cluster, ccu.cited
FROM theta_plus.imm1985_1995_citing_cited ccu
JOIN (SELECT cslu.*
FROM theta_plus.imm1985_1995_cluster_scp_list_mcl cslu
JOIN theta_plus.imm1985_1995_article_score_unshuffled asu ON asu.scp = cslu.scp
WHERE asu.article_score >= 1) cslu1 ON cslu1.scp = ccu.citing
JOIN (SELECT cslu.*
FROM theta_plus.imm1985_1995_cluster_scp_list_mcl cslu
JOIN theta_plus.imm1985_1995_article_score_unshuffled asu ON asu.scp = cslu.scp
WHERE asu.article_score >= 1) cslu2 ON cslu2.scp = ccu.cited
WHERE cslu1.cluster_no!=cslu2.cluster_no AND cslu2.cluster_no= """ + str(cluster_num) + """; -- all external in-degrees"""
cluster_scp_query="""SELECT *
FROM theta_plus.imm1985_1995_cluster_scp_list_mcl
WHERE cluster_no = """ + str(cluster_num) + """;"""
citing_cited = pd.read_sql(citing_cited_query, con=engine)
G = nx.from_pandas_edgelist(citing_cited, 'citing', 'cited', create_using=nx.DiGraph())
N=G.order()
degrees = dict(G.degree())
total_deg = pd.DataFrame.from_dict(degrees, orient='index', columns=['ext_cluster_total_degrees'])
total_deg['scp'] = total_deg.index
total_deg = total_deg.reset_index(drop=True)
indegrees = dict(G.in_degree())
total_in_deg = pd.DataFrame.from_dict(indegrees, orient='index', columns=['ext_cluster_in_degrees'])
total_in_deg['scp'] = total_in_deg.index
total_in_deg = total_in_deg.reset_index(drop=True)
outdegrees = dict(G.out_degree())
total_out_deg = | pd.DataFrame.from_dict(outdegrees, orient='index', columns=['ext_cluster_out_degrees']) | pandas.DataFrame.from_dict |
import pandas as pd
from matplotlib import pyplot as plt
obj_find = pd.read_json('./.results/raw/evaluate-find.json');
obj_reduce = | pd.read_json('./.results/raw/evaluate-reduce.json') | pandas.read_json |
import pandas as pd
import numpy as np
from pathlib import Path
def load(path, dt=False, stats=False):
print("loading data from",path)
dataFrames = {}
dataFrames['gameLogs'] = pd.read_csv(path/'GameLogs.csv', index_col=False)
if dt:
dataFrames['gameLogs']['Date'] = pd.to_datetime(dataFrames['gameLogs']['Date'])
dataFrames['people'] = pd.read_csv(path/'People.csv', index_col=False)
dataFrames['teams'] = pd.read_csv(path/'Teams.csv', index_col=False)
dataFrames['managers'] = pd.read_csv(path/'Managers.csv', index_col=False)
dataFrames['fieldings'] = pd.read_csv(path/'Fielding.csv', index_col=False)
dataFrames['pitchings'] = pd.read_csv(path/'Pitching.csv', index_col=False)
dataFrames['battings'] = pd.read_csv(path/'Batting.csv', index_col=False)
if stats:
dataFrames['stats'] = pd.read_csv(path/'Stats.csv', index_col=False)
print("data loaded")
return dataFrames
def save(path, dataFrames, stats=False):
print("Saving data to",path)
dataFrames['gameLogs'].to_csv(path/'GameLogs.csv', index = False)
dataFrames['people'].to_csv(path/'People.csv', index = False)
dataFrames['teams'].to_csv(path/'Teams.csv', index = False)
dataFrames['managers'].to_csv(path/'Managers.csv', index = False)
dataFrames['fieldings'].to_csv(path/'Fielding.csv', index = False)
dataFrames['pitchings'].to_csv(path/'Pitching.csv', index = False)
dataFrames['battings'].to_csv(path/'Batting.csv', index = False)
if stats:
dataFrames['stats'].to_csv(path/'Stats.csv', index = False)
print("Data saved")
def filter(path, saveState=True):
def filterFrame(frame, columns, renames=None):
frame = frame[columns]
if(renames!=None):
frame = frame.rename(columns=renames)
return frame.reset_index(drop=True)
def filterGameLogs(gameLogs, people):
gameLogs['Date'] = pd.to_datetime(gameLogs['Date'], format="%Y%m%d")
gameLogs['Visiting league AL'] = gameLogs['Visiting league']=="AL"
gameLogs['Home league AL'] = gameLogs['Home league']=="AL"
gameLogs = gameLogs[gameLogs['Forfeit information'].isna()]
gameLogs = gameLogs[gameLogs['Protest information'].isna()]
generalColumns = [
'Date','Visiting: Team','Visiting league AL','Home: Team','Home league AL','Visiting: Score','Home: Score']
visitingStatsColumns = [
'Visiting at-bats','Visiting hits','Visiting doubles','Visiting triples','Visiting homeruns','Visiting RBI','Visiting sacrifice hits','Visiting sacrifice flies',
'Visiting hit-by-pitch','Visiting walks','Visiting intentional walks','Visiting strikeouts','Visiting stolen bases','Visiting caught stealing','Visiting grounded into double plays',
'Visiting left on base','Visiting pitchers used','Visiting individual earned runs','Visiting team earned runs','Visiting wild pitches',
'Visiting balks','Visiting putouts','Visiting assists','Visiting errors','Visiting passed balls','Visiting double plays','Visiting triple plays']
homeStatsColumns = [
'Home at-bats','Home hits','Home doubles','Home triples','Home homeruns','Home RBI','Home sacrifice hits','Home sacrifice flies',
'Home hit-by-pitch','Home walks','Home intentional walks','Home strikeouts','Home stolen bases','Home caught stealing','Home grounded into double plays',
'Home left on base','Home pitchers used','Home individual earned runs','Home team earned runs','Home wild pitches',
'Home balks','Home putouts','Home assists','Home errors','Home passed balls','Home double plays','Home triple plays']
visitingIDColumns = [
'Visiting team manager ID','Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID']
homeIDColumns = [
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
identifier = people[['playerID','retroID']].drop_duplicates(subset=['retroID']).dropna()
for column in visitingIDColumns+homeIDColumns:
merged = pd.merge(gameLogs[column], identifier, left_on=column, right_on='retroID', how="left")
gameLogs[column] = merged['playerID']
gameLogs = filterFrame(gameLogs, generalColumns+visitingStatsColumns+homeStatsColumns+visitingIDColumns+homeIDColumns)
gameLogs = gameLogs.dropna(subset=generalColumns)
for column in visitingStatsColumns+homeStatsColumns:
gameLogs = gameLogs[(gameLogs[column]>=0) | (gameLogs[column].isna())]
return gameLogs.reset_index(drop=True)
def filterPeople(people):
people['yearID'] = people['birthYear']
people['weight'] = 0.453592*people['weight']
people['height'] = 0.0254*people['height']
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = filterFrame(people, ['yearID','playerID','weight','height','bats right', 'bats left', 'throws right'])
return people.reset_index(drop=True)
def filterTeams(teams):
teams = filterFrame(teams,
['yearID','teamIDretro','divID','Rank','G','W','L','DivWin','LgWin','WSWin','R','AB','H','2B','3B','HR','BB','SO','SB','CS','HBP','SF','RA','ER','ERA','SHO','SV','HA','HRA','BBA','SOA','E','DP','FP'],
{"teamIDretro":"teamID","divID":"Division","G":"Games","W":"Wins","L":"Losses","DivWin":"Division winner","LgWin":"League winner","WSWin":"World series winner","R":"Runs scored","AB":"At bats"
,"H":"Hits by batters","2B":"Doubles","3B":"Triples","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","SB":"Stolen bases","CS":"Cought stealing","HBP":"Batters hit by pitch"
,"SF":"Sacrifice flies","RA":"Opponents runs scored","ER":"Earned runs allowed","ERA":"Earned runs average","SHO":"Shutouts","SV":"Saves","HA":"Hits allowed"
,"HRA":"Homeruns allowed","BBA":"Walks allowed","SOA":"Strikeouts allowed","E":"Errors","DP":"Double plays","FP":"Fielding percentage"})
teams['division C'] = (teams['Division']=="C")
teams['division E'] = (teams['Division']=="E")
teams['division W'] = (teams['Division']=="W")
teams = teams.drop(columns=['Division'])
teams['Division winner'] = (teams['Division winner']=='Y')
teams['League winner'] = (teams['League winner']=='Y')
teams['World series winner']= (teams['World series winner']=='Y')
return teams.reset_index(drop=True)
print("start filtering")
dataFrames = load(path/'Input')
print("filter gameLogs")
dataFrames['gameLogs'] = filterGameLogs(dataFrames['gameLogs'], dataFrames['people'])
print("filter people")
dataFrames['people'] = filterPeople(dataFrames['people'])
print("filter teams")
dataFrames['teams'] = filterTeams(dataFrames['teams'])
print("filter managers")
dataFrames['managers'] = filterFrame(dataFrames['managers'],
['yearID','playerID','G','W','L'],
{"G":"Games","W":"Wins","L":"Losses"})
print("filter fieldings")
dataFrames['fieldings'] = filterFrame(dataFrames['fieldings'],
['yearID','playerID','PO','A','E','DP','PB','WP','SB','CS'],
{"PO":"Putouts","A":"Assists","E":"Error","DP":"Double plays","PB":"Passed Balls","WP":"Wild Pitches","SB":"Opponent Stolen Bases","CS":"Opponents Caught Stealing"})
print("filter pitchings")
dataFrames['pitchings'] = filterFrame(dataFrames['pitchings'],
['yearID','playerID','W','L','G','H','ER','HR','BB','SO','BAOpp','ERA','IBB','WP','HBP','BK','BFP','R','SH','SF','GIDP','SV','SHO'],
{"G":"Games","W":"Wins","L":"Losses","H":"Hits","ER":"Earned Runs","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","BAOpp":"Opponent batting average","ERA":"ERA"
,"IBB":"Intentional walks","WP":"Wild pitches","HBP":"Batters hit by pitch","BK":"Balks","BFP":"Batters faced","R":"Runs allowed","SH":"Batters sacrifices"
,"SF":"Batters sacrifice flies","GIDP":"Grounded into double plays","SV":"Saves","SHO":"Shutouts"})
print("filter battings")
dataFrames['battings'] = filterFrame(dataFrames['battings'],
['yearID','playerID','AB','R','H','2B','3B','HR','RBI','SB','CS','BB','SO','IBB','HBP','SH','SF','GIDP'],
{"AB":"At bats","R":"Runs","H":"Hits","2B":"Doubles","3B":"Triples","HR":"Homeruns","RBI":"Runs batted in","SB":"Stolen bases","CS":"Caught stealing"
,"BB":"Base on balls","SO":"Strikeouts","IBB":"Intentional walks","HBP":"Hit by pitch","SH":"Sacrifice hits","SF":"Sacrifice flies","GIDP":"Grounded into double plays"})
print("data filtered")
if saveState:
save(path/'Filtered', dataFrames)
return dataFrames
def replace(path, dataFrames, default="mean", lastKnownState=True, saveState=True, inpurity=0.5):
def replaceFrame(frame, targets, gameLogs, default, lastKnownState, inpurity):
#define ID column
mID = 'playerID'
for column in frame.columns:
if column=='teamID':
mID = 'teamID'
break
if column=='playerID':
break
#drop inpure columns
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
frame = frame[nanFrame[nanFrame['inpurity']<=inpurity]['index'].tolist()]
#creating frame containing only usefull data
onlyFrame = None
for column in targets:
temp = gameLogs[['Date',column]]
temp['yearID'] = temp['Date'].dt.year-1
temp = temp.rename(columns={column:mID})
onlyFrame = pd.concat([onlyFrame, temp]).drop(columns=['Date']).drop_duplicates().dropna().reset_index(drop=True)
#combining duplicates
aggregators = {}
for column in frame.drop(columns=['yearID',mID]).columns:
if (column.find("average")>-1) or (column.find("percentage")>-1):
aggregators[column] = 'mean'
elif (column.find("winner")>-1) or (column.find("division")>-1) or (column.find("Rank")>-1):
aggregators[column] = 'max'
else:
aggregators[column] = 'sum'
temp = frame[frame.duplicated(keep=False, subset=['yearID',mID])]
temp2 = pd.merge(temp[['yearID',mID]],temp.drop(columns=['yearID',mID]).notna(), left_index=True, right_index=True).groupby(['yearID',mID], as_index=False).sum()
temp = temp.groupby(['yearID',mID], as_index=False).agg(aggregators)
for column in temp.columns:
vec = temp2[column]==0
col = temp[column]
col[vec] = None
temp[column] = col
frame = frame.drop_duplicates(keep=False, subset=['yearID',mID])
frame = pd.concat([frame, temp])
mIDs = np.array(list(dict.fromkeys(frame[mID].unique().tolist()+onlyFrame[mID].unique().tolist())))
years = np.array(list(dict.fromkeys(frame['yearID'].unique().tolist()+onlyFrame['yearID'].unique().tolist())))
fullFrame = pd.DataFrame(np.array(np.meshgrid(years, mIDs)).T.reshape(-1,2), columns=['yearID',mID])
fullFrame['yearID'] = pd.to_numeric(fullFrame['yearID'])
fullFrame = pd.merge(fullFrame, frame, on=['yearID',mID], how="left")
if lastKnownState:
fullFrame = pd.merge(fullFrame[['yearID',mID]], fullFrame.groupby([mID]).ffill().drop(columns=['yearID']), left_index=True, right_index=True)
frame = pd.merge(onlyFrame, fullFrame, on=['yearID',mID], how="left")
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
while (not (nanFrame[nanFrame['inpurity']>inpurity/3])['index'].tolist())==False:
frame = frame[frame[nanFrame.at[nanFrame['inpurity'].idxmax(), 'index']].notna()]
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
if default!=None:
for column in frame.columns:
if frame[column].dtype=="bool":
frame[column].fillna(False)
continue
if default=="mean":
if (frame[column].dtype=="float64") | (frame[column].dtype=="int64"):
frame[column] = frame[column].fillna(frame[column].mean())
elif default=="zero":
if (frame[column].dtype=="float64") | (frame[column].dtype=="int64"):
frame[column] = frame[column].fillna(0)
#nanFrame = frame.isna().sum().reset_index()
#nanFrame['inpurity'] = nanFrame[0]/frame.index.size
#print(nanFrame[nanFrame['inpurity']>0])
return frame.dropna().reset_index(drop=True)
def replaceGameLogs(gameLogs):
return gameLogs.dropna().reset_index(drop=True)
def replacePeople(people, gameLogs, default):
columns = ['Visiting team manager ID','Visiting starting pitcher ID'
,'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID'
,'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID'
,'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'
,'Home team manager ID','Home starting pitcher ID'
,'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID'
,'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID'
,'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
onlyPeople = None
for column in columns:
temp = gameLogs[[column]]
temp = temp.rename(columns={column:'playerID'})
onlyPeople = | pd.concat([onlyPeople, temp]) | pandas.concat |
import os
import subprocess
from abc import ABCMeta, abstractmethod
from functools import reduce
from pathlib import Path
import numpy as np
import pandas as pd
import prody.atomic.atomgroup
from prody import parsePDB, writePDB
from download_protein import DownloadProtein
from get_gdt import get_gdt_for_target_df
from modeller_modeling import modeller_modeling
from seq import AlignSeq, ReadSeq
from xml2pir import blast_xml
class MakeDataset(metaclass=ABCMeta):
def __init__(self, target_name: str, dataset_name: str, blast_db: str,
resnum_start: int = None, resnum_end: int = None):
"""Abstract modeling class
Args:
target_name (str): target name. PDBID_CHAIN. For example, 1ADB_A.
dataset_name (str): name of the dataset
blast_db (str): path to the blast database of the pdbaa
"""
self.target_name = target_name
self.pdb_id, self.chain = self.target_name.split('_')
self.dataset_name = dataset_name
self.blast_db = blast_db
self.resnum_start = resnum_start
self.resnum_end = resnum_end
out_fasta_dir = Path('../fasta') / self.dataset_name
out_fasta_dir.mkdir(parents=True, exist_ok=True)
self.native_fasta_path = (out_fasta_dir / (self.target_name)).with_suffix('.fasta')
native_pdb_dir = Path('../native_pdb') / self.dataset_name
native_pdb_dir.mkdir(parents=True, exist_ok=True)
self.native_pdb_path = (native_pdb_dir / self.target_name).with_suffix('.pdb')
self.template_pdb_dir = Path('../template_pdb/')
self.xml_dir = Path('../blast_xml') / Path(self.blast_db).parent.name / self.dataset_name
self.xml_dir.mkdir(parents=True, exist_ok=True)
self.xml_path = (self.xml_dir / self.target_name).with_suffix('.xml')
self.xml_csv_path = (self.xml_dir / self.target_name).with_suffix('.csv')
self.out_pir_dir = Path('../pir') / self.dataset_name / self.target_name
self.out_pir_dir.mkdir(parents=True, exist_ok=True)
self.out_pir_dir_nofiltering = self.out_pir_dir / 'nofiltering'
self.out_pir_dir_nofiltering.mkdir(exist_ok=True)
self.out_pir_dir_filtering = self.out_pir_dir / 'filtering'
self.out_pir_dir_filtering.mkdir(exist_ok=True)
self.out_pdb_dir = Path('../pdb') / self.dataset_name / self.target_name
self.out_pdb_dir.mkdir(parents=True, exist_ok=True)
self.out_pdb_dir_nofiltering = self.out_pdb_dir / 'nofiltering'
self.out_pdb_dir_nofiltering.mkdir(exist_ok=True)
self.out_pdb_dir_filtering = self.out_pdb_dir / 'filtering'
self.out_pdb_dir_filtering.mkdir(exist_ok=True)
@abstractmethod
def _get_fasta(self) -> str:
"""Get fasta sequence.
Returns:
str: sequence (not including header)
"""
pass
@staticmethod
def _test_match(fasta_seq: str, mol: prody.atomic.atomgroup.AtomGroup) -> None:
"""test that the fasta sequence matches to the pdb sequence.
Args:
fasta_seq (str): Sequence of the fasta.
mol (prody.atomic.atomgroup.AtomGroup): PDB object read by ProDy.
"""
pdb_seq, pdb_resnum = ReadSeq.mol2seq(mol, insert_gap=False)
fasta_seq_array = np.array(list(fasta_seq))
pdb_seq_array = np.copy(fasta_seq_array)
pdb_seq_array[pdb_resnum - 1] = list(pdb_seq)
num_diff = np.count_nonzero(fasta_seq_array != pdb_seq_array)
num_missing = len(fasta_seq) - len(pdb_seq)
assert num_diff < len(fasta_seq) * 0.05
print('length:', len(fasta_seq))
print('num different residues between pdb and fasta:', num_diff)
print('num missing residues:', num_missing)
def _get_pdb(self) -> None:
"""download the pdb and fix residue numbers.
"""
if not self.native_pdb_path.exists():
tmp_pdb_path = self.pdb_id + '.pdb'
DownloadProtein.download_native_pdb(self.pdb_id, self.chain, tmp_pdb_path)
fasta_seq = ReadSeq.fasta2seq(self.native_fasta_path)
mol = parsePDB(tmp_pdb_path, chain=self.chain)
# if the first residue number is negative, correct the residue numbers
if mol.getResnums()[0] < 0:
resnums = mol.getResnums()
new_resnums = resnums - resnums[0] + 1
mol.setResnums(new_resnums)
new_resnums = DownloadProtein.correct_resnums(mol)
mol.setResnums(new_resnums)
pdb_seq, pdb_resnums = ReadSeq.mol2seq(mol, insert_gap=True)
align_pseq, align_fseq, align_findices, align_pindices = AlignSeq.align_fasta_and_pdb(fasta_seq, pdb_seq)
sel_mol_resnums = pdb_resnums[align_pindices]
sel_mol = mol.select('resnum {}'.format(reduce(lambda a, b: str(a) + ' ' + str(b), sel_mol_resnums)))
assert sel_mol is not None
if self.resnum_start is not None and self.resnum_end is not None:
pdb_resnum_start, pdb_resnum_end = str(sel_mol_resnums[0]), str(sel_mol_resnums[-1])
if pdb_resnum_start != self.resnum_start or pdb_resnum_end != self.resnum_end:
print('The residue number of pdb and the specified number are different')
print('pdb start: {}, specified start: {}, pdb end: {}, specified end:{}'
.format(pdb_resnum_start, self.resnum_start, pdb_resnum_end, self.resnum_end))
fasta_resnum = align_findices + 1
convert_resnum_dict = dict(zip(sel_mol_resnums, fasta_resnum))
new_resnum = [convert_resnum_dict[resnum] for resnum in sel_mol.getResnums()]
sel_mol.setResnums(new_resnum)
self._test_match(fasta_seq, sel_mol)
writePDB(str(self.native_pdb_path), sel_mol)
os.remove(tmp_pdb_path)
def _psiblast_xml(self, iteration: str = '3', evalue_threshold: str = '1.0e-3') -> None:
"""psiblast against pdb.
Args:
iteration (str, optional): the number of the iteration. Defaults to '3'.
"""
cmd = ['psiblast', '-query', self.native_fasta_path, '-db', self.blast_db, '-out', self.xml_path,
'-num_iterations', iteration, '-outfmt', '5', '-evalue', evalue_threshold]
subprocess.run(cmd)
def _xml2pir(self) -> None:
"""convert blast xml to the pir format.
"""
bx = blast_xml(self.xml_path, self.native_fasta_path, self.template_pdb_dir)
nofiltering_template_df = bx.convert_xml_to_pir_nofiltering(self.out_pir_dir_nofiltering)
filtering_template_df = bx.convert_xml_to_pir_filtering(self.out_pir_dir_filtering)
nofiltering_template_df['Method'] = 'nofiltering'
filtering_template_df['Method'] = 'filtering'
concat_df = pd.concat([nofiltering_template_df, filtering_template_df])
concat_df.to_csv(self.xml_csv_path)
def preprocess(self) -> None:
"""Generate fasta, download pdb, psiblast, and generate pir files.
"""
self._get_fasta()
self._get_pdb()
self._psiblast_xml()
self._xml2pir()
def check_standard(self):
df = | pd.read_csv(self.xml_csv_path) | pandas.read_csv |
#!/usr/bin/env python3
import contextlib
import datetime
import glob
import os
import sqlite3
import subprocess
import pandas as pd
import requests
DATA_DIR = "data"
POP_DIR = os.path.join(DATA_DIR, "population")
POP_ZIP = "population.zip"
POP_URL = "http://api.worldbank.org/v2/en/indicator/SP.POP.TOTL?downloadformat=csv"
COVID_DIR = "covid"
COVID_REPO = "https://github.com/CSSEGISandData/COVID-19.git"
COVID_DAILY_REPORTS_DIR = "csse_covid_19_data/csse_covid_19_daily_reports"
COVID_SQLITE = os.path.join(DATA_DIR, "covid.sqlite")
os.makedirs(DATA_DIR, exist_ok=True)
def covid_update():
if os.path.exists(os.path.join(DATA_DIR, COVID_DIR)):
subprocess.call(
["git", "pull", "--rebase", "origin", "master"],
cwd=os.path.join(DATA_DIR, COVID_DIR),
)
else:
subprocess.call(["git", "clone", COVID_REPO, COVID_DIR], cwd=DATA_DIR)
def covid_csv_files():
return glob.glob(
os.path.join(DATA_DIR, COVID_DIR, COVID_DAILY_REPORTS_DIR, "*.csv")
)
def read_covid_csv(csv_file):
df = | pd.read_csv(csv_file) | pandas.read_csv |
import pandas as pd
import pathlib
import os
class BronzeToSilver:
def __init__(self, data_path):
self.data_path = data_path
try:
self.bronze_path = pathlib.Path.joinpath(self.data_path, "bronze")
except:
print(f"Bronze path does not exist! Please create directory")
try:
self.silver_path = pathlib.Path.joinpath(self.data_path, "silver")
except:
print(f"Silver path does not exist! Please create directory")
def load_bronze_df(self, filename="ecx_bronze.pkl"):
file_path = pathlib.Path.joinpath(self.bronze_path, filename)
df = | pd.read_pickle(file_path) | pandas.read_pickle |
"""
Implementation of Econometric measures of
connectness and systemic risk in finance and
insurance sectors by M.Billio, M.Getmansky,
<NAME>, L.Pelizzon
"""
import pandas as pd
import numpy as np
from arch import arch_model
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from typing import Dict
from itertools import combinations, product
from scipy.stats import ttest_1samp
from scipy.sparse.linalg import eigs
from marketlearn.causality_network.sector_data import SectorPrice
from marketlearn.causality_network.vector_ar.bivar import BiVariateVar
from marketlearn.causality_network.graph import GraphAdMap
# pylint: disable=invalid-name, undefined-loop-variable
class CNet:
"""
Class Implements the granger causal flows in a complicated network
"""
class PreProcess:
"""
Nested Class to pre-process data before program start and create
"sectors" attribute
"""
def __init__(self, start: str = "1999-12-31"):
"""
Constructor used to instantiate class
:param start: (str) start date in format 'YYYY-MM-DD'
"""
self.start = start
self.preprocess_sectors()
self.transformed_data = None
@staticmethod
def _get_sectors() -> pd.DataFrame:
"""
Downloads the sector prices from SectorPrice
:return: (pd.DataFrame) *?
"""
return SectorPrice().read()
def preprocess_sectors(self):
"""
Preprocesses data by removing any NAs
and create sectors attribute
:return: (None)
"""
# pylint: disable=consider-iterating-dictionary
sec = self._get_sectors()
for k in sec.keys():
sec[k] = sec[k][sec[k].index >= self.start].dropna(axis=1)
# Create the sectors attribute
self.sectors = sec.copy()
# Garbage collection
sec = None
def get(self) -> pd.DataFrame:
"""
Returns the sectors after preprocessing
"""
return self.sectors
# --- main class definition
def __init__(self, start: str):
self.sectors = self.PreProcess(start=start).get()
self.pca = PCA()
self.sc = StandardScaler()
self.lr = None
self.ret1 = None
self.ret2 = None
self.errors = None
self.transformed_data = None
def risk_fraction(self, data: pd.DataFrame, n: int = 3):
"""
Computes the cumulative risk fraction of system
see ref: formula (6) of main paper
:param data: (pd.DataFrame) end of month prices
shape = (n_samples, p_shares)
:param n: (int) Number of principal components (3 by default)
assumes user has chosen the best n
:return: (float)
"""
# Store col names
col_names = list(data)
# Compute log returns
data = np.log(1 + data.pct_change())
data = self.sc.fit_transform(data.dropna())
data = self.pca.fit_transform(data)
self.transformed_data = pd.DataFrame(data, columns=col_names)
# Total risk of system
system_risk = np.sum(self.pca.explained_variance_)
# Risk associated with first n principal components
pca_risk = self.pca.explained_variance_[:n].sum() / system_risk
return pca_risk
def is_connected(self, data: pd.DataFrame, n: int = 3, thresh: float = 0.3) -> bool:
"""
Determines the interconnectedness in a system
see ref: formula (6) of main paper
:param data: (pd.DataFrame) end of month prices
:param n: (int) Number of principal components (3 by default)
:param thresh: (int) Prespecified threshold (0.3 by default)
:return: (bool) True if first n principal components
explains more than some fraction thresh of total volatility
"""
return self.risk_fraction(data, n=n) >= thresh
def pcas(
self, data: pd.DataFrame, institution_i: str, n: int = 3, thresh: float = 0.3
) -> pd.Series():
"""
Measures connectedness for each company
or exposure of company to total risk of system
see ref: formula (8)
:param data: (pd.DataFrame) end of month prices
:param institution_i: (str) name of the institution
:param n: (int) Number of principal components (3 by default)
:param thresh: (int) Prespecified threshold (0.3 by default)
:return: (pd.Series) if institution_i is None, return
the connectedness of each company to system as a series
otherwise returns the exposure of institution_i
"""
if not self.is_connected(data, n, thresh):
raise ValueError("system not connected - increase n or thresh")
# Get the variances of each institution
var = self.transformed_data.var()
# Get system variance
system_var = self.transformed_data.cov().sum().sum()
# Get the loadings
loadings = self.pca.components_[:n] ** 2
weights = self.pca.explained_variance_[:n]
result = (weights @ loadings).sum() * var / system_var
return result if institution_i is None else result[institution_i]
def linear_granger_causality(
self, data1: pd.Series(), data2: pd.Series(), alpha: float = 0.05
) -> dict:
"""
Tests if data1 granger causes data2
:param data1: (pd.Series)
:param data2: (pd.Series) *?
:param alpha: (float) *? (0.05 by default)
:return: (dict) containing True, False result of
causality. Key1='x_granger_causes_y', key2='y_granger_causes_x'
"""
# Log prices pt = log(Pt)
logp1 = np.log(data1).values
logp2 = np.log(data2).values
# Log returns rt = pt - pt-1
ret = np.diff(logp1)
ret2 = np.diff(logp2)
# Remove mean from sample prior to garch fit
returns = [None, None]
# g = Garch(mean=False)
idx = 0
for r in [ret, ret2]:
_, pval = ttest_1samp(r, 0)
# Sample mean is not zero
if pval < 0.05:
r -= r.mean()
# g.fit(r)
am = arch_model(100 * r, mean="Zero")
res = am.fit(disp="off")
returns[idx] = r / res.conditional_volatility
idx += 1
# Scaled returns based on garch volatility
ret, ret2 = returns
# Check for cointegration
bivar = BiVariateVar(fit_intercept=True)
self.lr = bivar.lr
coint = bivar.coint_test(logp1, logp2, alpha=alpha)
self.ret1 = ret
self.ret2 = ret2
# Auto select based on lowest bic and fit
bivar.select_order(ret, ret2, coint=coint)
# Check for granger causality
result = bivar.granger_causality_test()
return result
def _create_casual_network(self, data: pd.DataFrame()) -> GraphAdMap:
"""
Creates connections between N financial Institutions
:param data: (pd.DataFrame) end of month prices
:return: (GraphAdMap) graph of adjacency map
containing causality network between institutions
in data
"""
# Create a directed graph
g = GraphAdMap(directed=True)
share_names = iter(list(data))
vertices = iter([g.insert_vertex(v) for v in share_names])
# Create granger causality network
key1 = "x_granger_causes_y"
key2 = "y_granger_causes_x"
for c in combinations(vertices, 2):
# Extract the vertices
u, v = c
# Get the respestive prices
price1 = data[u.get_value()]
price2 = data[v.get_value()]
try:
# Check for linear granger causality
granger_result = self.linear_granger_causality(price1, price2)
if granger_result[key1]:
g.insert_edge(u, v, 1)
if granger_result[key2]:
g.insert_edge(v, u, 1)
except ValueError as e:
self.errors = []
print("Error occured for {}".format(e))
self.errors.append((u.get_value(), v.get_value()))
return g
def _create_sector_casual_network(
self, sector1: pd.DataFrame(), sector2: | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
'''
First, get the relevant data from dumps/mysql
Sitelinks:
mysql --host analytics-store.eqiad.wmnet wikidatawiki -e "select concat('Q', ips_item_id) as id, ips_site_id as site, replace(ips_site_page, ' ', '_') as title from wb_items_per_site join page on page_title = concat('Q', ips_item_id) where page_namespace = 0 and ips_site_id like '%wiki';" > sitelinks.tsv
Pagecounts:
wget https://dumps.wikimedia.org/other/pagecounts-ez/merged/pagecounts-2017-04-views-ge-5-totals.bz2
bunzip2 pagecounts-2017-04-views-ge-5-totals.bz2
echo "site title pageviews" > pagecounts.ssv
grep -e '^[a-z]*\.z ' --color=no pagecounts-2017-04-views-ge-5-totals | sed 's/\.z /wiki /' >> pagecounts.ssv
'''
pagecounts = pd.read_csv('pagecounts.ssv', sep=' ')
sitelinks = | pd.read_csv('sitelinks.tsv', sep='\t') | pandas.read_csv |
#!/usr/bin/python
"""fit tuning curves to first level results
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import argparse
import warnings
import os
import json
import matplotlib.pyplot as plt
import pandas as pd
from scipy import optimize
import numpy as np
def log_norm_pdf(x, a, mode, sigma):
"""the pdf of the log normal distribution, with a scale factor
"""
# note that mode here is the actual mode, for us, the peak spatial frequency. this differs from
# the 2d version we have, where we we have np.log2(x)+np.log2(p), so that p is the inverse of
# the preferred period, the ivnerse of the mode / the peak spatial frequency.
pdf = a * np.exp(-(np.log2(x)-np.log2(mode))**2/(2*sigma**2))
return pdf
def get_tuning_curve_xy(a, mode, sigma, x=None, norm=False):
if x is None:
x = np.logspace(-20, 20, 20000, base=2)
y = log_norm_pdf(x, a, mode, sigma)
if norm:
y /= y.max()
return x, y
def get_tuning_curve_xy_from_df(df, x=None, norm=False):
"""given a dataframe with one associated tuning curve, return x and y of that tuning curve
"""
params = {'x': x}
for param, param_label in [('a', 'tuning_curve_amplitude'), ('mode', 'tuning_curve_peak'),
('sigma', 'tuning_curve_sigma')]:
if df[param_label].nunique() > 1:
raise Exception("Only one tuning curve can be described in df \n%s!" % df)
params[param] = df[param_label].unique()[0]
return get_tuning_curve_xy(norm=norm, **params)
def log_norm_describe_full(a, mode, sigma):
"""calculate and return many descriptions of the log normal distribution
returns the bandwidth (in octaves), low and high half max values of log normal
curve, inf_warning, x and y.
inf_warning is a boolean which indicates whether we calculate the variance to be infinite. this
happens when the mode is really small as the result of an overflow and so you should probably
examine this curve to make sure things are okay
x and y are arrays of floats so you can plot the tuning curve over a reasonable range.
"""
mu = np.log(mode) + sigma**2
# we compute this because the std dev is always larger than the bandwidth, so we can use this
# to make sure we grab the right patch of x values
var = (np.exp(sigma**2) - 1) * (np.exp(2*mu + sigma**2))
inf_warning = False
if np.isinf(var):
# if the peak of the curve would be at a *really* low or *really* high value, the variance
# will be infinite (not really, but because of computational issues) and so we need to
# handle it separately. this really shouldn't happen anymore, since I've constrained the
# bounds of the mode
if np.log2(mode) < 0:
x = np.logspace(-300, 100, 100000, base=2)
else:
x = np.logspace(-100, 300, 100000, base=2)
inf_warning = True
else:
xmin, xmax = np.floor(np.log2(mode) - 5*sigma), np.ceil(np.log2(mode) + 5*sigma)
x = np.logspace(xmin, xmax, 1000*(xmax - xmin), base=2)
x, y = get_tuning_curve_xy(a, mode, sigma, x)
half_max_idx = abs(y - (y.max() / 2.)).argsort()
if (not (x[half_max_idx[0]] > mode and x[half_max_idx[1]] < mode) and
not (x[half_max_idx[0]] < mode and x[half_max_idx[1]] > mode)):
print(a, mode, sigma)
raise Exception("Something went wrong with bandwidth calculation! halfmax x values %s and"
" %s must lie on either side of max %s!" % (x[half_max_idx[0]],
x[half_max_idx[1]], mode))
low_half_max = np.min(x[half_max_idx[:2]])
high_half_max = np.max(x[half_max_idx[:2]])
bandwidth = np.log2(high_half_max) - np.log2(low_half_max)
return bandwidth, low_half_max, high_half_max, inf_warning, x, y
def create_problems_report(fit_problems, inf_problems, save_path):
"""create html report showing problem cases
"""
plots_save_path = os.path.join(save_path.replace('.html', '') + "_report_plots", "plot%03d.svg")
if not os.path.isdir(os.path.dirname(plots_save_path)):
os.makedirs(os.path.dirname(plots_save_path))
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'report_templates.json')) as f:
report_template_strs = json.load(f)
report_text = report_template_strs['HEAD']
for title, problems in zip(["fitting curves", "finding bandwidths"],
[fit_problems, inf_problems]):
report_text += "<h2>Those cases that had problems %s</h2>" % title
for i, (labels, (x, y), (datax, datay)) in enumerate(problems):
report_text += labels.style.render()
plt.scatter(datax, datay)
plt.semilogx(x, y, basex=2)
plt.savefig(plots_save_path % i)
plt.close()
report_text += report_template_strs['FIGURE'] % (plots_save_path % i)
report_text += report_template_strs['TAIL']
with open(save_path, 'w') as f:
f.write(report_text)
# add bounds to the command line?
def main(df, save_path=None, mode_bounds=(2**(-5), 2**11), ampl_bounds=(0, 10),
sigma_bounds=(0, 10)):
"""fit tuning curve to first level results dataframe
note that your mode_bounds are stimuli dependent. for the log-normal stimuli (for either the
pilot or regular stimuli), the default value above works well. for the constant stimuli,
(2**(-15), 2**8) seems to work
"""
if 'bootstrap_num' in df.columns:
additional_cols = ['bootstrap_num']
else:
additional_cols = []
df = df.rename(columns={'amplitude_estimate_median': 'amplitude_estimate'})
melt_cols = ['varea', 'eccen', 'amplitude_estimate', 'stimulus_superclass',
'freq_space_angle', 'baseline'] + additional_cols
df = df[['freq_space_distance', 'local_sf_magnitude'] + melt_cols]
df = pd.melt(df, melt_cols, var_name='frequency_type', value_name='frequency_value')
gb_columns = ['varea', 'eccen', 'stimulus_superclass', 'frequency_type'] + additional_cols
gb = df.groupby(gb_columns)
tuning_df = []
fit_problems, inf_problems = [], []
for n, g in gb:
# we want a sense of where this is, in order to figure out if it stalled out.
str_labels = ", ".join("%s: %s" % i for i in zip(gb_columns, n))
print("\nCreating tuning curves for: %s" % str_labels)
fit_warning = False
if 'mixtures' in n or 'off-diagonal' in n or 'baseline' in n:
# then these points all have the same frequency and so we can't fit a frequency tuning
# curve to them
continue
values_to_fit = zip(g.frequency_value.values, g.amplitude_estimate.values)
# in python2, zip returned a list. in python3, it returns an iterable. we don't actually
# want to iterate through it here, just index into it, so we convert it to a list
values_to_fit = list(zip(*sorted(values_to_fit, key=lambda pair: pair[0])))
fit_success = False
maxfev = 100000
tol = 1.5e-08
while not fit_success:
try:
mode_guess = np.log(np.mean(values_to_fit[0]))
if mode_guess < mode_bounds[0]:
mode_guess = 1
popt, _ = optimize.curve_fit(log_norm_pdf, values_to_fit[0], values_to_fit[1],
maxfev=maxfev, ftol=tol, xtol=tol,
p0=[1, mode_guess, 1],
# optimize.curve_fit needs to be able to take the
# len(bounds), and zips have no length
bounds=list(zip(ampl_bounds, mode_bounds, sigma_bounds)))
fit_success = True
except RuntimeError:
fit_warning = True
maxfev *= 10
tol /= np.sqrt(10)
# popt contains a, mode, and sigma, in that order
bandwidth, lhm, hhm, inf_warning, x, y = log_norm_describe_full(popt[0], popt[1], popt[2])
tuning_df.append(g.assign(tuning_curve_amplitude=popt[0], tuning_curve_peak=popt[1],
tuning_curve_sigma=popt[2], preferred_period=1./popt[1],
tuning_curve_bandwidth=bandwidth, high_half_max=hhm, low_half_max=lhm,
fit_warning=fit_warning, inf_warning=inf_warning, tol=tol, maxfev=maxfev,
mode_bound_lower=mode_bounds[0], mode_bound_upper=mode_bounds[1]))
warning_cols = gb_columns + ['tol', 'maxfev', 'tuning_curve_amplitude',
'tuning_curve_sigma', 'tuning_curve_peak',
'tuning_curve_bandwidth']
if fit_warning:
warnings.warn('Fit not great for:\n%s' % str_labels.replace(', ', '\n'))
fit_problems.append(( | pd.DataFrame(tuning_df[-1][warning_cols].iloc[0]) | pandas.DataFrame |
"""
oil price data source: https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf
"""
import pandas as pd
import numpy as np
import tabula
import requests
import plotly.express as px
import plotly.graph_objects as go
import time
from pandas.tseries.offsets import MonthEnd
import re
import xmltodict
def process_table(table_df):
print("processing the downloaded PDF from PPAC website.")
cols = ['Date', 'Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol',
'Date_D', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']
table_df.columns = cols
table_df.drop(table_df.index[[0,3]],inplace=True)
table_df.drop('Date_D',axis=1,inplace=True)
table_df.dropna(how='any',inplace=True)
table_df = table_df.astype(str)
table_df = table_df.apply(lambda x: x.str.replace(" ", ""))
table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']] = table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']].astype(float)
table_df['Date'] = pd.to_datetime(table_df['Date'])
table_petrol = table_df[['Date','Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol','Kolkata_Petrol']]
table_diesel = table_df[['Date','Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']]
new_cols = [i.replace("_Petrol", "") for i in list(table_petrol.columns)]
table_petrol.columns = new_cols
table_diesel.columns = new_cols
return table_petrol, table_diesel
def get_international_exchange_rates(start_date,end_date):
print("sending request for international exchange rates.")
exchange_dates_url = "https://api.exchangeratesapi.io/history?"
params = {"start_at": start_date, "end_at":end_date, "base":"USD", "symbols":"INR"}
try:
req = requests.get(exchange_dates_url,params=params)
except Exception as e:
print(e)
print("request failed. using the saved data.")
dollar_exchange_rates = pd.read_csv("dollar_exhange_rates.csv")
dollar_exchange_rates['Date'] = pd.to_datetime(dollar_exchange_rates)
dollar_exchange_rates.set_index('Date').sort_index(ascending=False)
return dollar_exchange_rates
else:
print("request successful. processing the data.")
dollar_exchange_rates = pd.DataFrame(req.json()['rates']).T.reset_index()
dollar_exchange_rates['index'] = pd.to_datetime(dollar_exchange_rates['index'])
dollar_exchange_rates.set_index('index').sort_index(ascending=False)
dollar_exchange_rates.to_csv("dollar_exhange_rates.csv")
return dollar_exchange_rates
# def merge_data(dollar_exchange_rates, international_oil_prices, oil_price_data):
# print("merging the international oil price data, international exchange rate data and domestic oil price data.")
# trim_int = international_oil_prices.loc[international_oil_prices.index.isin(oil_price_data.index)].dropna()
# oil_price_data = oil_price_data.merge(trim_int, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data = oil_price_data.merge(dollar_exchange_rates, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data['INR'] = oil_price_data['INR'].round(2)
# oil_price_data['INR_pc'] = (((oil_price_data['INR'] - oil_price_data['INR'].iloc[-1])/oil_price_data['INR'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude'] = (oil_price_data['Price'] / 159) * oil_price_data['INR']
# oil_price_data['int_pc'] = (((oil_price_data['Price'] - oil_price_data['Price'].iloc[-1])/oil_price_data['Price'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude_pc'] = (((oil_price_data['rup_lit_crude'] - oil_price_data['rup_lit_crude'].iloc[-1])/oil_price_data['rup_lit_crude'].iloc[-1])*100).round(2)
# return oil_price_data
def download_ppac():
print("sending request for domestic oil price data from PPAC website.")
ppac_url = r"https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf"
try:
req = requests.get(ppac_url)
except Exception as e:
print(e)
print("Request unsuccessful. The saved file will be used.")
else:
with open('DATA/price_data.pdf', 'wb') as file:
file.write(req.content)
print('file saved successfully.')
def prepare_downloaded_file():
print("preparing downloaded file for analysis.")
oil_prices = 'DATA/price_data.pdf'
tables = tabula.read_pdf(oil_prices, pages="all")
proc_dfs = [process_table(i) for i in tables]
petrol_df = pd.concat(i[0] for i in proc_dfs)
diesel_df = pd.concat(i[1] for i in proc_dfs)
print(f"Success. Length of Petrol prices {len(petrol_df)}------ diesel prices {len(diesel_df)}")
petrol_df['mean_price'] = (petrol_df['Delhi']+petrol_df['Mumbai']+petrol_df['Chennai']+petrol_df['Kolkata'])/4
diesel_df['mean_price'] = (diesel_df['Delhi']+diesel_df['Mumbai']+diesel_df['Chennai']+diesel_df['Kolkata'])/4
print("Adding percent change columns")
for i in petrol_df.columns[1:]:
petrol_df[f'{i}_pc'] = (((petrol_df[i] - petrol_df[i].iloc[-1])/petrol_df[i].iloc[-1]) * 100).round(2)
for i in diesel_df.columns[1:]:
diesel_df[f'{i}_pc'] = (((diesel_df[i] - diesel_df[i].iloc[-1])/diesel_df[i].iloc[-1]) * 100).round(2)
petrol_df.set_index("Date",inplace=True)
diesel_df.set_index("Date",inplace=True)
return petrol_df, diesel_df
def prep_consumption_df(consumption_df,year):
consumption_df.reset_index(inplace=True)
consumption_df.dropna(how='any',inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = | pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons') | pandas.melt |
import logging
import numpy as np
import pandas as pd
import re
from os import PathLike
from pathlib import Path
from scipy.ndimage import maximum_filter
from typing import (
Generator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from steinbock import io
try:
from readimc import MCDFile, TXTFile
from readimc.data import Acquisition, AcquisitionBase
imc_available = True
except:
imc_available = False
_logger = logging.getLogger(__name__)
def list_mcd_files(mcd_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(mcd_dir).rglob("*.mcd"))
def list_txt_files(txt_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(txt_dir).rglob("*.txt"))
def create_panel_from_imc_panel(
imc_panel_file: Union[str, PathLike],
imc_panel_channel_col: str = "Metal Tag",
imc_panel_name_col: str = "Target",
imc_panel_keep_col: str = "full",
imc_panel_ilastik_col: str = "ilastik",
) -> pd.DataFrame:
imc_panel = pd.read_csv(
imc_panel_file,
sep=",|;",
dtype={
imc_panel_channel_col: pd.StringDtype(),
imc_panel_name_col: pd.StringDtype(),
imc_panel_keep_col: pd.BooleanDtype(),
imc_panel_ilastik_col: pd.BooleanDtype(),
},
engine="python",
true_values=["1"],
false_values=["0"],
)
for required_col in (imc_panel_channel_col, imc_panel_name_col):
if required_col not in imc_panel:
raise ValueError(f"Missing '{required_col}' column in IMC panel")
for notnan_col in (
imc_panel_channel_col,
imc_panel_keep_col,
imc_panel_ilastik_col,
):
if notnan_col in imc_panel and imc_panel[notnan_col].isna().any():
raise ValueError(f"Missing values for '{notnan_col}' in IMC panel")
rename_columns = {
imc_panel_channel_col: "channel",
imc_panel_name_col: "name",
imc_panel_keep_col: "keep",
imc_panel_ilastik_col: "ilastik",
}
drop_columns = [
panel_col
for imc_panel_col, panel_col in rename_columns.items()
if panel_col in imc_panel.columns and panel_col != imc_panel_col
]
panel = imc_panel.drop(columns=drop_columns).rename(columns=rename_columns)
for _, g in panel.groupby("channel"):
panel.loc[g.index, "name"] = " / ".join(g["name"].dropna().unique())
if "keep" in panel:
panel.loc[g.index, "keep"] = g["keep"].any()
if "ilastik" in panel:
panel.loc[g.index, "ilastik"] = g["ilastik"].any()
panel = panel.groupby(panel["channel"].values).aggregate("first")
panel = _clean_panel(panel) # ilastik column may be nullable uint8 now
ilastik_mask = panel["ilastik"].fillna(False).astype(bool)
panel["ilastik"] = pd.Series(dtype=pd.UInt8Dtype())
panel.loc[ilastik_mask, "ilastik"] = range(1, ilastik_mask.sum() + 1)
return panel
def create_panel_from_mcd_files(
mcd_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for mcd_file in mcd_files:
with MCDFile(mcd_file) as f:
for slide in f.slides:
for acquisition in slide.acquisitions:
panel = _create_panel_from_acquisition(acquisition)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def create_panel_from_txt_files(
txt_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for txt_file in txt_files:
with TXTFile(txt_file) as f:
panel = _create_panel_from_acquisition(f)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def filter_hot_pixels(img: np.ndarray, thres: float) -> np.ndarray:
kernel = np.ones((1, 3, 3), dtype=bool)
kernel[0, 1, 1] = False
max_neighbor_img = maximum_filter(img, footprint=kernel, mode="mirror")
return np.where(img - max_neighbor_img > thres, max_neighbor_img, img)
def preprocess_image(
img: np.ndarray, hpf: Optional[float] = None
) -> np.ndarray:
img = img.astype(np.float32)
if hpf is not None:
img = filter_hot_pixels(img, hpf)
return io._to_dtype(img, io.img_dtype)
def try_preprocess_images_from_disk(
mcd_files: Sequence[Union[str, PathLike]],
txt_files: Sequence[Union[str, PathLike]],
channel_names: Optional[Sequence[str]] = None,
hpf: Optional[float] = None,
) -> Generator[
Tuple[Path, Optional["Acquisition"], np.ndarray, Optional[Path], bool],
None,
None,
]:
unmatched_txt_files = list(txt_files)
for mcd_file in mcd_files:
try:
with MCDFile(mcd_file) as f_mcd:
for slide in f_mcd.slides:
for acquisition in slide.acquisitions:
matched_txt_file = _match_txt_file(
mcd_file, acquisition, unmatched_txt_files
)
if matched_txt_file is not None:
unmatched_txt_files.remove(matched_txt_file)
channel_ind = None
if channel_names is not None:
channel_ind = _get_channel_indices(
acquisition, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found for "
f"acquisition {acquisition.id} in file "
"{mcd_file}; skipping acquisition"
)
continue
img = None
recovered = False
try:
img = f_mcd.read_acquisition(acquisition)
except IOError:
_logger.warning(
f"Error reading acquisition {acquisition.id} "
f"from file {mcd_file}"
)
if matched_txt_file is not None:
_logger.warning(
f"Restoring from file {matched_txt_file}"
)
try:
with TXTFile(matched_txt_file) as f_txt:
img = f_txt.read_acquisition()
if channel_names is not None:
channel_ind = _get_channel_indices(
f_txt, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} "
"not found in file "
f"{matched_txt_file}; "
"skipping acquisition"
)
continue
recovered = True
except IOError:
_logger.exception(
"Error reading file "
f"{matched_txt_file}"
)
if img is not None: # exceptions ...
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield (
Path(mcd_file),
acquisition,
img,
Path(matched_txt_file)
if matched_txt_file is not None
else None,
recovered,
)
del img
except:
_logger.exception(f"Error reading file {mcd_file}")
while len(unmatched_txt_files) > 0:
txt_file = unmatched_txt_files.pop(0)
try:
channel_ind = None
with TXTFile(txt_file) as f:
if channel_names is not None:
channel_ind = _get_channel_indices(f, channel_names)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found in file "
f"{txt_file}; skipping acquisition"
)
continue
img = f.read_acquisition()
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield Path(txt_file), None, img, None, False
del img
except:
_logger.exception(f"Error reading file {txt_file}")
def _create_panel_from_acquisition(
acquisition: "AcquisitionBase",
) -> pd.DataFrame:
panel = pd.DataFrame(
data={
"channel": acquisition.channel_names,
"name": acquisition.channel_labels,
"keep": True,
"ilastik": range(1, acquisition.num_channels + 1),
"deepcell": np.nan,
},
)
panel["channel"] = panel["channel"].astype(pd.StringDtype())
panel["name"] = panel["name"].astype(pd.StringDtype())
panel["keep"] = panel["keep"].astype(pd.BooleanDtype())
panel["ilastik"] = panel["ilastik"].astype(pd.UInt8Dtype())
panel["deepcell"] = panel["deepcell"].astype(pd.UInt8Dtype())
panel.sort_values(
"channel",
key=lambda s: pd.to_numeric(s.str.replace("[^0-9]", "", regex=True)),
inplace=True,
)
return panel
def _clean_panel(panel: pd.DataFrame) -> pd.DataFrame:
panel.sort_values(
"channel",
key=lambda s: pd.to_numeric(s.str.replace("[^0-9]", "", regex=True)),
inplace=True,
)
name_dupl_mask = panel["name"].duplicated(keep=False)
name_suffixes = panel.groupby("name").cumcount().map(lambda i: f" {i + 1}")
panel.loc[name_dupl_mask, "name"] += name_suffixes[name_dupl_mask]
if "keep" not in panel:
panel["keep"] = pd.Series(True, dtype=pd.BooleanDtype())
if "ilastik" not in panel:
panel["ilastik"] = pd.Series(dtype=pd.UInt8Dtype())
panel.loc[panel["keep"], "ilastik"] = range(1, panel["keep"].sum() + 1)
if "deepcell" not in panel:
panel["deepcell"] = pd.Series(dtype= | pd.UInt8Dtype() | pandas.UInt8Dtype |
# pylint: disable=C0103,C0301,E0401
"""Scrape the FAA identifier and other info from IEM and AirNav
based on IEM ASOS identifier
Writes a complete file of station info as "airport_meta.csv"
Notes:
This method seems more straightforward than downloading data from FAA,
could not find FAA resource making use of ICAO identifiers.
"""
import argparse
import time
import pandas as pd
import requests
def scrape_asos_meta():
"""Get the meta data for ASOS stations provided by IEM
Returns:
pandas.DataFrame of station meta data on AK_ASOS network
"""
uri = "https://mesonet.agron.iastate.edu/sites/networks.php?network=AK_ASOS&format=csv&nohtml=on"
r = requests.get(uri)
meta_list = [t.split(",") for t in r.content.decode()[:-2].split("\n")]
meta = | pd.DataFrame(meta_list) | pandas.DataFrame |
import requests
import json
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
import datetime
import plotly.graph_objects as go
payload = {'format': 'json', 'per_page': '500', 'date':'1990:2015'}
q = requests.get('https://api.covid19api.com/dayone/country/Canada', params=payload)
canada1 = json.loads(q.text)
canada2 = json_normalize(canada1)
canada2 = canada2.replace(np.nan,0)
canada3 = canada2.groupby(['Date','Province'])[['Confirmed','Deaths',"Recovered",'Active']].mean()
canada3 = canada3.reset_index()
canada3 = canada3[canada3['Province']!='']
canada3['Date'] = pd.to_datetime(canada3['Date'])
canada4 = canada3.copy()
canada4['day'] = canada4['Date'].dt.day_name()
canada4 = canada4[canada4['Date']=='2020-05-04']
# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in
# `data/file_name.csv`
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
canada5 = canada2[canada2['Province']=='']
canada5['Date'] = pd.to_datetime(canada5['Date'] )
canada5 = canada5[canada5['Date']=='2020-05-04']
conf = canada5['Confirmed'].values
conf2 = canada5['Active'].values
conf3 = canada5['Deaths'].values
graph_one = []
import plotly.graph_objects as go
graph_one = go.Figure()
graph_one.add_trace(
go.Indicator(
value = conf[0],
delta = {'reference': 160},
gauge = {'axis': {'visible': False}},
title = {'text': "Confirmed"},
domain = {'row': 0, 'column': 0}))
graph_one.add_trace(
go.Indicator(
mode = "number",
value = conf2[0],
title = {'text': "Active"},
domain = {'row': 1, 'column': 0}))
graph_one.add_trace(go.Indicator(
mode = "delta",
value = -conf3[0],
title = {'text': "Deaths"},
domain = {'row': 2, 'column': 0}))
graph_one.update_layout(
title=("Overview"),
grid = {'rows': 3, 'columns': 3, 'pattern': "independent"},
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': 90}}]
}})
layout_one = dict(title = 'Current State of Coronavirus in Canada'
)
layout_one = dict(
xaxis = dict(title = 'Date',),
yaxis = dict(title = 'Cases'),
)
graph_two = []
graph_two.append(
go.Bar(name='Deaths', x=canada3['Date'], y=canada3['Deaths'])
)
layout_two = dict(title = 'Number of Deaths per Day',
xaxis = dict(title = 'Date',),
yaxis = dict(title = 'Cases'),
)
graph_three = []
from plotly import tools
trace1 = go.Bar(name='Confirmed', x=canada3['Date'], y=canada3['Confirmed'])
trace2 = go.Bar(name='Active', x=canada3['Date'], y=canada3['Active'])
graph_three = tools.make_subplots(rows=1, cols=1, shared_xaxes=True)
graph_three.append_trace(trace2, 1,1)
graph_three.append_trace(trace1, 1, 1)
graph_three.update_yaxes(title_text="Cases", row=1, col=1)
graph_three.update_xaxes(title_text="Date", row=1, col=1)
graph_three['layout'].update(title='Number of Confirmed vs Active Cases per Day')
layout_three = dict(
xaxis = dict(title = 'Date', ),
yaxis = dict(title = 'Cases'),
)
graph_four=[]
import plotly.graph_objects as go
graph_four = go.Figure()
graph_four.add_trace(
go.Scatter(
x=canada4['Province'],
y=canada4['Deaths'],
name="Deaths"
))
graph_four.add_trace(
go.Bar(
x=canada4['Province'],
y=canada4['Confirmed'],
name="Confirmed"
))
graph_four.update_layout(title_text='Confirmed vs Death Cases per Province')
graph_four.update_xaxes(title_text='Province')
graph_four.update_yaxes(title_text='Cases')
layout_four = dict(
xaxis = dict(title = 'Province', ),
yaxis = dict(title = 'Cases'),
)
graph_five=[]
canada6 = canada4[['Province','Confirmed','Deaths','Active']]
graph_five = go.Figure(data=[go.Table(
header=dict(values=list(canada6.columns),
fill_color='paleturquoise',
align='left'),
cells=dict(values=[canada6.Province, canada6.Confirmed, canada6.Deaths, canada6.Active],
fill_color='lavender',
align='left'))])
graph_five.update_layout(title_text='Confirmed, Deaths and Active Cases Per Province')
layout_five = dict(
xaxis = dict(title = 'Province', ),
yaxis = dict(title = 'Cases'),
)
graph_six=[]
canada6 = canada3[['Date','Confirmed','Deaths','Active']]
canada6['Date'] = | pd.to_datetime(canada6['Date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Figure 2A
VCs=[]
with open('GPD_VCs.txt') as inFile:
for line in inFile:
toks=line.strip().split('\t')
if len(toks)>1: # No singletons
VCs.append(toks)
scaff_to_gca={}
with open('gca_to_scaf.txt') as inFile:
for line in inFile:
scaff_to_gca[line.split()[1].strip()]=line.split()[0]
gca_to_scaff={}
for k in list(scaff_to_gca.keys()):
gca_to_scaff[scaff_to_gca[k]]=k
all_uvs_assem={}
with open('WG_crispr_targets.txt') as inFile:
for line in inFile:
try:
all_uvs_assem[line.split()[0]].append(line.strip().split()[1])
except:
all_uvs_assem[line.split()[0]]=[line.strip().split()[1]]
assem_to_fam={}
assem_to_order={}
assem_to_class={}
assem_to_phyla={}
assem_to_genus={}
assem_to_spp={}
fam_to_phyla={}
order_to_phyla={}
class_to_phyla={}
genus_to_phyla={}
genus_to_fam={}
with open('hgg_bgi_taxonomy.tab') as inFile:
for line in inFile:
assem=line.split('\t')[0]
if len(assem.split('_'))==3:
assem=assem.split('_')[0]+'_'+assem.split('_')[1]+'#'+assem.split('_')[2]
elif 'scaffold' in assem:
assem=scaff_to_gca[assem]
fam=line.split('\t')[5]
phyla=line.split('\t')[2]
order=line.split('\t')[4]
genus=line.split('\t')[-2]
classB=line.split('\t')[3]
spp=line.split('\t')[-1].strip()
if 'Firmicutes' in phyla:
phyla='Firmicutes'
assem_to_fam[assem]=fam
assem_to_order[assem]=order
assem_to_class[assem]=classB
assem_to_phyla[assem]=phyla
assem_to_genus[assem]=genus
assem_to_spp[assem]=spp
fam_to_phyla[fam]=phyla
order_to_phyla[order]=phyla
class_to_phyla[classB]=phyla
genus_to_phyla[genus]=phyla
genus_to_fam[genus]=fam
# Counting number of assemblies assigned to each VC
vc_to_assem={}
for vc_idx in range(len(VCs)):
vc_to_assem[vc_idx]=[]
for uv in VCs[vc_idx]:
assem=all_uvs_assem.get(uv)
if assem!=None:
for assem_i in assem:
vc_to_assem[vc_idx].append(assem_i)
for k in vc_to_assem.keys():
vc_to_assem[k]=list(set(vc_to_assem[k]))
# Number of VCs / number of isolates (genus)
genus_to_iso={}
genus_set=list(set(assem_to_genus.values()))
for k in genus_set:
genus_to_iso[k]=0
for k in genus_set:
with open('hgg_bgi_taxonomy.tab') as inFile:
for line in inFile:
if line.split('\t')[-2]==k:
genus_to_iso[k]+=1
# Viral diversity across bacterial clades (genera)
gen_vD={}
gen_set=[]
for i in list(set(genus_to_iso.keys())):
if i!='NA':
gen_set.append(i)
gen_set=list(set(gen_set))
for f in gen_set:
gen_vD[f]=0
# Counting number of assemblies per genus
for my_gen in gen_set:
for vc in vc_to_assem.keys():
n=0
# For each assembly associated to each VC
for assem in vc_to_assem[vc]:
gen_i=assem_to_genus.get(assem)
if gen_i==my_gen:
n+=1
gen_vD[my_gen]+=n
gen_vD[my_gen]=gen_vD[my_gen]/genus_to_iso[my_gen]
# Only picking families with at least 10 members
gen_vD_n10={}
for g in gen_vD.keys():
if genus_to_iso[g]>=10:
gen_vD_n10[g]=gen_vD[g]
# Adding phylum
phylum=[]
for g in gen_vD_n10.keys():
phylum.append(genus_to_phyla[g])
genus_dict= | pd.DataFrame() | pandas.DataFrame |
import pickle
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from skmultilearn.problem_transform import ClassifierChain
from utils.preprocessing import clean_text
def list2string(list):
return ','.join(map(str, list))
file_tweets = "new_personality_combined.csv"
file_personalities = "personality-data.txt"
data_tweets = pd.read_csv(file_tweets, sep=",", encoding="utf8", index_col=0)
data_personalities = pd.read_csv(file_personalities, sep="\t", encoding="utf8", index_col=10)
print(data_tweets)
# Join the two dataframes together
merged_df = pd.merge(data_tweets, data_personalities, on='twitter_uid', how='inner')
merged_df.reset_index(drop=True, inplace=True)
# Drop the statues (the text)
personality_categories = list(merged_df.columns.values)[2:]
# Print dataset statistics
print("Final number of data in personality dataset =", merged_df.shape[0])
print("Number of personality categories =", len(personality_categories))
print("Personality categories =", ', '.join(personality_categories))
print(merged_df['statuses'])
merged_df['statuses'] = merged_df.statuses.apply(clean_text)
print(merged_df['statuses'])
merged_df['statuses'] = [list2string(list) for list in merged_df['statuses']]
# Split the personality categories into 3 quantiles to convert the problem to classification
bins = 3
labels = [0, 1, 2]
merged_df['ADMIRATION'] = pd.cut(merged_df['ADMIRATION'], bins, labels=labels)
merged_df['AGRE'] = pd.cut(merged_df['AGRE'], bins, labels=labels)
merged_df['ANXIETY'] = pd.cut(merged_df['ANXIETY'], bins, labels=labels)
merged_df['AVOIDANCE'] = | pd.cut(merged_df['AVOIDANCE'], bins, labels=labels) | pandas.cut |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.