prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
glucoseDataFrame.py
Creates a dataframe of glucose related statistics
in diabetics for predictive analysis.
"""
import sys
import os
import math
from datetime import *
from dateutil.parser import parse
import pandas as pd
import numpy as np
sys.path.append("..") # proper file path for importing local modules
from pythonScripts.jsonToCsv import convertToCsv
#-------CONSTANTS-------------
CONVERSION_FACTOR = 18.01559
#-------Dicts----------
#basal rates (unit/hour)
BASAL = {
"0" : .625,
"2" : .650, #if hour equals 2, then also minute = 30 cause (2:30)
"4" : .800,
"8" : .725,
"12" : .700,
"14" : .250,
"19" : .650
}
#insulin sensitivity (mg/dL/unit)
SENSITIVITY = {
"0" : 60,
"6" : 70,
"9" : 60,
"12" : 60,
"15" : 60
}
#carb ratio (grams/unit)
CARB_RATIO = {
"0" : 10,
"6" : 5,
"11" : 5.5, #if hour equals 11, then also minute = 30 cause (11:30)
"14" : 6,
"18" : 7,
"21" : 9
}
#----------------------
#-----------------------------
def convert_glucose(glucose_levels):
"""Do conversion across entire dataset
conversion mmol/L to mg/dL"""
value_row = glucose_levels.loc[:, 'value']
convert_row = value_row.mul(CONVERSION_FACTOR)
round_conversion = convert_row.round(2)
return round_conversion
def divide_timestamp(time_row):
"""Seperates timestamp into individual
months, days, weekdays, hours, and minutes"""
month_list = []
day_list = []
weekday_list = []
hour_list = []
minutes_list = []
time_str = time_row.astype(str).values.tolist()
for i in time_str:
#for months
month = parse(i).month
month_list.append(month)
#for days
day = parse(i).day
day_list.append(day)
#for weekdays
weekday = parse(i).weekday()
weekday_list.append(weekday)
#for hours
hour = parse(i).hour
hour_list.append(hour)
#for minutes
minute = parse(i).minute
minutes_list.append(minute)
return month_list, day_list, weekday_list, hour_list, minutes_list
def create_dataframe():
"""Creates dataframe for glucose analysis"""
#---get correct path to csv input file-----------
path_to_input_csv = convertToCsv()
current_file = os.path.basename(path_to_input_csv)
print(f"Currently Reading File: {current_file}")
care_link_file = input("\nEnter Medtronic File: ")
#------------------------------------------------
#----------Create data frame-------------------
#get all data from csv
gluc_level_data = pd.read_csv(path_to_input_csv)
# remove rows that are NaN for value
gluc_level_data = gluc_level_data[pd.notnull(gluc_level_data["value"])]
#----------------------------------------------
#---------------conversion mmol/L to mg/dL-----------------
glu = convert_glucose(gluc_level_data)
#----------------------------------------------------------
#--------Save month, day, weekday, hour, minutes---------------
timestamp = gluc_level_data.loc[:, 'time']
saved_index = timestamp.index # save the index from this dataframe as variable index
month_list, day_list, weekday_list, hour_list, minutes_list = divide_timestamp(timestamp)
#convert the lists to dataframes while ensuring the index corresponds to the other dataframes
monthdf = pd.DataFrame(np.array(month_list), index=saved_index)
daydf = pd.DataFrame(np.array(day_list), index=saved_index)
weekdaydf = pd.DataFrame(np.array(weekday_list), index=saved_index)
hourdf = pd.DataFrame(np.array(hour_list), index=saved_index)
minutesdf = pd.DataFrame(np.array(minutes_list), index=saved_index)
#--------------------------------------------------------------
#---------BOLUS OUTPUT---------------------------
path_to_care_link = os.path.join(os.getcwd(), "csvData", "csvInData")
bolus_carb_csv = pd.read_csv(os.path.join(path_to_care_link, care_link_file), skiprows=6)
bolus = bolus_carb_csv.loc[:, 'Bolus Volume Delivered (U)']
date = bolus_carb_csv.loc[:, 'Date']
time = bolus_carb_csv.loc[:, 'Time']
carb = bolus_carb_csv.loc[:, 'BWZ Carb Input (grams)']
bolus_carb_data =
|
pd.concat([date, time, bolus, carb], axis=1, ignore_index=True)
|
pandas.concat
|
from contextlib import contextmanager
from time import time, sleep
from .datasets import timeseries
from .ops import spatial_mean, temporal_mean, climatology, anomaly
from distributed import wait
from distributed.utils import format_bytes
import datetime
from distributed import Client
import pandas as pd
import logging
import os
logger = logging.getLogger()
logger.setLevel(level=logging.WARNING)
here = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
results_dir = os.path.join(here, "results")
class DiagnosticTimer:
def __init__(self):
self.diagnostics = []
@contextmanager
def time(self, **kwargs):
tic = time()
yield
toc = time()
kwargs["runtime"] = toc - tic
self.diagnostics.append(kwargs)
def dataframe(self):
return pd.DataFrame(self.diagnostics)
def cluster_wait(client, n_workers):
""" Delay process until all workers in the cluster are available.
"""
start = time()
wait_thresh = 600
worker_thresh = n_workers * 0.95
while len(client.cluster.scheduler.workers) < n_workers:
sleep(2)
elapsed = time() - start
# If we are getting close to timeout but cluster is mostly available,
# just break out
if elapsed > wait_thresh and len(client.cluster.scheduler.workers) >= worker_thresh:
break
class Runner:
def __init__(self, input_file):
import yaml
try:
with open(input_file) as f:
self.params = yaml.safe_load(f)
except Exception as exc:
raise exc
self.computations = [spatial_mean, temporal_mean, climatology, anomaly]
self.client = None
def create_cluster(self, job_scheduler, maxcore, walltime, memory, queue, wpn):
""" Creates a dask cluster using dask_jobqueue
"""
logger.warning("Creating a dask cluster using dask_jobqueue")
logger.warning(f"Job Scheduler: {job_scheduler}")
logger.warning(f"Memory size for each node: {memory}")
logger.warning(f"Number of cores for each node: {maxcore}")
logger.warning(f"Number of workers for each node: {wpn}")
from dask_jobqueue import PBSCluster, SLURMCluster
job_schedulers = {"pbs": PBSCluster, "slurm": SLURMCluster}
# Note about OMP_NUM_THREADS=1, --threads 1:
# These two lines are to ensure that each benchmark workers
# only use one threads for benchmark.
# in the job script one sees twice --nthreads,
# but it get overwritten by --nthreads 1
cluster = job_schedulers[job_scheduler](
cores=maxcore,
memory=memory,
processes=wpn,
local_directory="$TMPDIR",
interface="ib0",
queue=queue,
walltime=walltime,
env_extra=["OMP_NUM_THREADS=1"],
extra=["--nthreads 1"],
)
self.client = Client(cluster)
logger.warning(
"************************************\n"
"Job script created by dask_jobqueue:\n"
f"{cluster.job_script()}\n"
"***************************************"
)
logger.warning(f"Dask cluster dashboard_link: {self.client.cluster.dashboard_link}")
def run(self):
logger.warning("Reading configuration YAML config file")
machine = self.params["machine"]
job_scheduler = self.params["job_scheduler"]
queue = self.params["queue"]
walltime = self.params["walltime"]
maxmemory_per_node = self.params["maxmemory_per_node"]
maxcore_per_node = self.params["maxcore_per_node"]
chunk_per_worker = self.params["chunk_per_worker"]
freq = self.params["freq"]
spil = self.params["spil"]
output_dir = self.params.get("output_dir", results_dir)
now = datetime.datetime.now()
output_dir = os.path.join(output_dir, f"{machine}/{str(now.date())}")
os.makedirs(output_dir, exist_ok=True)
parameters = self.params["parameters"]
num_workers = parameters["number_of_workers_per_nodes"]
num_threads = parameters.get("number_of_threads_per_workers", 1)
num_nodes = parameters["number_of_nodes"]
chunking_schemes = parameters["chunking_scheme"]
chsz = parameters["chunk_size"]
for wpn in num_workers:
self.create_cluster(
job_scheduler=job_scheduler,
maxcore=maxcore_per_node,
walltime=walltime,
memory=maxmemory_per_node,
queue=queue,
wpn=wpn,
)
for num in num_nodes:
self.client.cluster.scale(num * wpn)
cluster_wait(self.client, num * wpn)
timer = DiagnosticTimer()
dfs = []
logger.warning(
"#####################################################################\n"
f"Dask cluster:\n"
f"\t{self.client.cluster}\n"
)
for chunk_size in chsz:
for chunking_scheme in chunking_schemes:
logger.warning(
f"Benchmark starting with: \n\tworker_per_node = {wpn},"
f"\n\tnum_nodes = {num}, \n\tchunk_size = {chunk_size},"
f"\n\tchunking_scheme = {chunking_scheme},"
f"\n\tchunk per worker = {chunk_per_worker}"
)
ds = timeseries(
chunk_per_worker=chunk_per_worker,
chunk_size=chunk_size,
chunking_scheme=chunking_scheme,
num_nodes=num,
freq=freq,
worker_per_node=wpn,
).persist()
wait(ds)
dataset_size = format_bytes(ds.nbytes)
logger.warning(ds)
logger.warning(f"Dataset total size: {dataset_size}")
for op in self.computations:
with timer.time(
operation=op.__name__,
chunk_size=chunk_size,
chunk_per_worker=chunk_per_worker,
dataset_size=dataset_size,
worker_per_node=wpn,
threads_per_worker=num_threads,
num_nodes=num,
chunking_scheme=chunking_scheme,
machine=machine,
maxmemory_per_node=maxmemory_per_node,
maxcore_per_node=maxcore_per_node,
spil=spil,
):
wait(op(ds).persist())
# kills ds, and every other dependent computation
self.client.cancel(ds)
temp_df = timer.dataframe()
dfs.append(temp_df)
now = datetime.datetime.now()
filename = f"{output_dir}/compute_study_{now.strftime('%Y-%m-%d_%H-%M-%S')}.csv"
df =
|
pd.concat(dfs)
|
pandas.concat
|
"""
Filtering transMap. This process has 6 steps:
1) Filter out all projections whose genomic span is more than 5 times the original transcript. This is a hard coded
filter to deal with the possibility of rearrangements leading to massive transMap projections. This is required also
to allow the minSpan filter in pslCDnaFilter to work properly -- minSpan is an effective filter against retroposed
pseudogenes.
2) Run pslCDnaFilter using the globalNearBest algorithm to identify the best set of alignments. Turning this value
to a smaller number increases the number of alignments filtered out, which decreases the paralogous alignment call rate.
3) Separate coding and non-coding genes and run both through clusterGenes with or without the -cds flag.
4) For each gene ID in #2, see if it hits more than one cluster. Pick the highest scoring cluster. This resolves
paralogy to ostensible 1-1 orthologs. This populates the GeneAlternateLoci tag.
5) For each cluster ID in #2 that remains after #3, see if it hits more than one gene. If so, then we have a putative
gene family collapse. Pick the highest average scoring gene and discard the other genes, populating the CollapsedGeneIds
and CollapsedGeneNames tags.
6) Perform a rescue step where transMaps that were filtered out by paralog resolution but overlap a valid cluster
are re-added to the set despite not being globalNearBest.
After these steps, the transcripts are evaluated for split genes. This process takes the max span filtered set and
looks at each transcript separately, seeing if there exists projections on either the same contig or different contigs
that are disjoint in original transcript coordinates. This implies that there was a split or a rearrangement.
"""
import os
import json
import collections
import pandas as pd
import tools.nameConversions
import tools.transcripts
import tools.psl
import tools.mathOps
import tools.procOps
import tools.fileOps
import tools.intervals
import tools.sqlInterface
pd.options.mode.chained_assignment = None
def filter_transmap(tm_psl, ref_psl, tm_gp, db_path, psl_tgt, global_near_best, json_tgt):
"""
Entry point for transMap filtering.
:param tm_psl: input PSL
:param ref_psl: reference fake PSL
:param tm_gp: genePred from tm_psl
:param db_path: Path to reference database, to get gene name to transcript name mapping
:param psl_tgt: luigi.LocalTarget() object for PSL output
:param global_near_best: globalNearBest value to pass to PslCDnaFilter
:param json_tgt: luigi.localTarget() object for JSON output
:return:
"""
# load all of the input alignments
unfiltered = tools.psl.get_alignment_dict(tm_psl)
unfiltered_tx_dict = tools.transcripts.get_gene_pred_dict(tm_gp)
ref_psl_dict = tools.psl.get_alignment_dict(ref_psl)
# pre-filter out suspiciously large spans
size_filtered, num_too_long = ref_span(unfiltered, ref_psl_dict)
tmp_size_filtered = tools.fileOps.get_tmp_file()
with open(tmp_size_filtered, 'w') as outf:
for aln in size_filtered.itervalues():
tools.fileOps.print_row(outf, aln.psl_string())
# get transcript -> gene map
transcript_gene_map = tools.sqlInterface.get_transcript_gene_map(db_path)
# get transcript -> biotype and gene -> biotype maps for metrics
transcript_biotype_map = tools.sqlInterface.get_transcript_biotype_map(db_path)
gene_biotype_map = tools.sqlInterface.get_gene_biotype_map(db_path)
# get annotation information for common names
annotation_df = tools.sqlInterface.load_annotation(db_path)
gene_name_map = dict(zip(annotation_df.GeneId, annotation_df.GeneName))
# Construct a hash of alignment metrics to alignment IDs
# The reason for this is that pslCDnaFilter rearranges them internally, so we lose order information
def hash_aln(aln):
"""Hacky way to hash an alignment"""
return hash(tuple([aln.t_name, aln.t_start, aln.t_end, aln.matches, aln.mismatches, aln.block_count,
tools.nameConversions.strip_alignment_numbers(aln.q_name),
tuple(aln.t_starts), tuple(aln.q_starts), tuple(aln.block_sizes)]))
unfiltered_hash_table = {}
for aln_id, aln in unfiltered.iteritems():
unfiltered_hash_table[hash_aln(aln)] = aln_id
assert len(unfiltered_hash_table) == len(unfiltered)
with tools.fileOps.TemporaryFilePath() as local_tmp:
cmd = [['sed', 's/\-[0-9]\+//', tmp_size_filtered], # strip unique identifiers for comparative filters
['pslCDnaFilter', '-globalNearBest={}'.format(global_near_best),
'-minCover=0.1', '-verbose=0',
'-minSpan=0.2', '/dev/stdin', '/dev/stdout']]
tools.procOps.run_proc(cmd, stdout=local_tmp)
filtered_alns = list(tools.psl.psl_iterator(local_tmp))
# load globalBest IDs by using the hash table to figure out which ones we had
global_best = {unfiltered[unfiltered_hash_table[hash_aln(aln)]] for aln in filtered_alns}
global_best_txs = [unfiltered_tx_dict[aln.q_name] for aln in global_best]
# report counts by biotype
grouped = tools.psl.group_alignments_by_qname(global_best)
metrics = {'Paralogy': collections.defaultdict(lambda: collections.Counter())}
paralogy_df = []
for tx_id, alns in grouped.iteritems():
biotype = transcript_biotype_map[tx_id]
paralogy_df.append([tx_id, ','.join(sorted([x.q_name for x in alns if x.q_name != tx_id]))])
metrics['Paralogy'][biotype][len(alns)] += 1
paralogy_df = pd.DataFrame(paralogy_df, columns=['TranscriptId', 'Paralogy'])
# run pslCDnaFilter again, with no options, to get scores
with tools.fileOps.TemporaryFilePath() as tmp_verbose:
cmd = ['pslCDnaFilter', '-verbose=5', tmp_size_filtered, '/dev/stdout']
tools.procOps.run_proc(cmd, stderr=tmp_verbose, stdout='/dev/null')
scores = parse_verbose(tmp_verbose)
# now coding and non-coding genes are split up. Coding genes are any genes who have a transcript with an ORF
# the reason to do this is that running clusterGenes on full transcripts can lead to false fusions because
# overlapping UTR intervals are real.
# identify all genes that are non-coding. Non-coding is defined as genes who have no ORFs
global_best_by_gene = tools.transcripts.group_transcripts_by_name2(global_best_txs)
coding_genes = {gene_id for gene_id, tx_list in global_best_by_gene.iteritems()
if any(x.cds_size > 0 for x in tx_list)}
with tools.fileOps.TemporaryFilePath() as coding_tmp, tools.fileOps.TemporaryFilePath() as noncoding_tmp, \
tools.fileOps.TemporaryFilePath() as coding_clusters, tools.fileOps.TemporaryFilePath() as noncoding_clusters:
with open(coding_clusters, 'w') as out_coding, open(noncoding_clusters, 'w') as out_noncoding:
for tx in global_best_txs:
if tx.name2 in coding_genes:
tools.fileOps.print_row(out_coding, tx.get_gene_pred())
else:
tools.fileOps.print_row(out_noncoding, tx.get_gene_pred())
cmd = ['clusterGenes', '-cds', coding_tmp, 'no', coding_clusters]
tools.procOps.run_proc(cmd)
cmd = ['clusterGenes', noncoding_tmp, 'no', noncoding_clusters]
tools.procOps.run_proc(cmd)
coding_clustered = pd.read_csv(coding_tmp, sep='\t')
noncoding_clustered = pd.read_csv(noncoding_tmp, sep='\t')
metrics['Gene Family Collapse'] = collections.defaultdict(lambda: collections.Counter())
coding_merged_df, coding_collapse_filtered = filter_clusters(coding_clustered, transcript_gene_map,
gene_name_map, scores, metrics, gene_biotype_map)
noncoding_merged_df, noncoding_collapse_filtered = filter_clusters(noncoding_clustered, transcript_gene_map,
gene_name_map, scores, metrics, gene_biotype_map)
merged_collapse_filtered = pd.concat([coding_collapse_filtered, noncoding_collapse_filtered])
merged_df = pd.concat([coding_merged_df, noncoding_merged_df])
# Now that these have been processed separately, two things must happen:
# 1) All non-coding isoforms of coding genes must be re-added
# 2) Alignments filtered by globalNearBest for a filtered gene should be rescued if they have sufficient coverage
# first, group the putative rescue transcripts by gene ID. Require that they exist in scores because otherwise
# that means they are weirdly overlapping
high_cov_ids = {x.q_name for x in unfiltered.itervalues() if x.coverage > 0.5 and x.q_name in scores}
high_cov_ids -= set(merged_collapse_filtered.gene) # gene is alignment ID
putative_rescue_txs = {tx for aln_id, tx in unfiltered_tx_dict.iteritems() if aln_id in high_cov_ids}
unfiltered_by_gene = tools.transcripts.group_transcripts_by_name2(putative_rescue_txs)
rescued_txs = []
# for each gene ID that survived filtering, find their interval
for gene_id, group in merged_collapse_filtered.groupby('gene_id'):
assert len(set(group['#cluster'])) == 1
tx_intervals = []
for _, s in group.iterrows():
tx_intervals.append(tools.intervals.ChromosomeInterval(s.chrom, s.txStart, s.txEnd, s.strand))
tx_intervals = tools.intervals.hull_of_intervals(tx_intervals)
assert tx_intervals is not None
gene_interval = tx_intervals[0]
for tx in unfiltered_by_gene[gene_id]:
if tx.interval.overlap(gene_interval):
rescued_txs.append(tx.name)
# the final step is filtering for duplicates. Duplicates here means that we have multiple transMap
# mapping to the same locus. Pick the highest scores
combined_txs = rescued_txs + list(merged_collapse_filtered.gene)
combined_tx_df =
|
pd.DataFrame(combined_txs, columns=['AlignmentId'])
|
pandas.DataFrame
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Leaderboard.ipynb (unless otherwise specified).
__all__ = ['get_leaderboard']
# Internal Cell
import pandas as pd
import numpy as np
import boto3
from pathlib import Path
import datetime
# Internal Cell
import zipfile
import shutil
import torch
import tempfile
from .datasets import get_dataset, get_test_dataset
from .instance_segmentation.model import *
# Internal Cell
def parse_filename(fname):
tmp = fname.split("-")
date = pd.to_datetime(tmp[1] + tmp[2] + tmp[3])
alias = tmp[6]
email = tmp[7]
submitted_iou = tmp[5].split("=")[1]
return {
"file_name": fname,
"date": date,
"alias": alias,
"email": email,
"submitted_iou": submitted_iou,
"calculated_iou": np.nan,
}
# Internal Cell
s3 = boto3.resource("s3")
my_bucket = s3.Bucket("ai-league.cisex.org")
private_leaderboard_path = Path("private_leaderboard.csv")
public_leaderboard_path = Path("leaderboard.csv")
def get_submissions_from_s3(private_leaderboard_path=private_leaderboard_path):
"""Downloads the zip file from s3 if there is no record of it in the csv file"""
if private_leaderboard_path.exists():
private_leaderboard = pd.read_csv(private_leaderboard_path)
else:
private_leaderboard = dict(file_name=[])
# download file into models_for_evaluation directory
s3_objects = [
s3_object
for s3_object in my_bucket.objects.all()
if Path(s3_object.key).match("*submission*.zip")
and Path(s3_object.key).name not in list(private_leaderboard["file_name"])
]
if len(s3_objects) > 0:
for i, s3_object in enumerate(s3_objects):
print(f"Downloading {i+1}/{len(s3_objects)} from S3...")
my_bucket.download_file(s3_object.key, f"models_for_evaluation/{Path(s3_object.key).name}")
# return new entries
new_entries = pd.Series([Path(s3_object.key).name for s3_object in s3_objects]).apply(parse_filename).apply(pd.Series)
else:
x = "uploaded-2020-12-22T15:35:15.513570-submission-iou=0.46613-dolphin123-<EMAIL>-2020-12-22T15:35:04.875962.zip"
new_entries = pd.Series([x]).apply(parse_filename).apply(pd.Series).iloc[:0, :]
return new_entries
# Internal Cell
def public(private_leaderboard):
return private_leaderboard[["alias", "date", "submitted_iou", "calculated_iou"]]
# Internal Cell
def merge_with_private_leaderboard(
new_entries, private_leaderboard_path=private_leaderboard_path
):
# merge private leaderboard and new_entries if needed
new_entries["calculated_iou"] = np.nan
if private_leaderboard_path.exists():
private_leaderboard = pd.read_csv(private_leaderboard_path)
private_leaderboard = pd.concat([private_leaderboard, new_entries], axis=0)
private_leaderboard = private_leaderboard.drop_duplicates(subset="file_name")
else:
private_leaderboard = new_entries
private_leaderboard.to_csv(private_leaderboard_path, index=False)
return private_leaderboard
# Internal Cell
def evaluate_model(model_path, mode: str = "test") -> float:
# do it
with tempfile.TemporaryDirectory() as d:
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall(path=d)
unzipped_path = [x for x in Path(d).glob("submiss*")][0]
model = torch.load(unzipped_path / "model.pt")
if mode.lower() == "val":
_, data_loader = get_dataset("segmentation", batch_size=4)
elif mode.lower() == "test":
data_loader = get_test_dataset("segmentation", batch_size=4)
else:
raise ValueError()
iou, iou_df = iou_metric(model, data_loader.dataset)
return iou
# Internal Cell
cut_off_date = datetime.datetime(2021, 6, 5, 0, 0)
def evaluate_private_leaderboard(private_leaderboard_path=private_leaderboard_path):
private_leaderboard = pd.read_csv(private_leaderboard_path,parse_dates=["date"])
private_leaderboard = private_leaderboard[private_leaderboard["date"] < cut_off_date]
new_entries = private_leaderboard.loc[private_leaderboard["calculated_iou"].isna()]
n = new_entries.shape[0]
for i, ix in enumerate(new_entries.index):
row = new_entries.loc[ix]
file_name, alias, dt = row["file_name"], row["alias"], row["date"]
print(f"Evaluating model {i+1}/{n} for {alias} submitted at {dt}...")
calculated_iou = evaluate_model(f"models_for_evaluation/{file_name}", mode="test")
private_leaderboard.loc[ix, "calculated_iou"] = calculated_iou
private_leaderboard.to_csv(private_leaderboard_path, index=False)
return private_leaderboard
# Internal Cell
def save_public_leaderboard(private_leaderboard_path=private_leaderboard_path, public_leaderboard_path=public_leaderboard_path):
private_leaderboard =
|
pd.read_csv(private_leaderboard_path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
self.mam3 = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
self.mam4 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
self.nitro_nmr = Fchk(resource('g16-nitromalonamide-6-31++g-nmr.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.atom))))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.atom))))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set))))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set))))
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.orbital))))
self.mam2.parse_orbital()
self.assertEqual(self.mam2.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.orbital))))
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.momatrix))))
self.mam2.parse_momatrix()
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.momatrix))))
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set_order))))
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.basis_set_order))))
def test_parse_frame(self):
self.mam1.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.frame))))
self.mam2.parse_frame()
self.assertEqual(self.mam2.frame.shape[0], 1)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.frame))))
def test_parse_frequency(self):
self.mam3.parse_frequency()
self.assertEqual(self.mam3.frequency.shape[0], 240)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency))))
self.mam4.parse_frequency()
self.assertEqual(self.mam4.frequency.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency))))
def test_parse_frequency_ext(self):
self.mam3.parse_frequency_ext()
self.assertEqual(self.mam3.frequency_ext.shape[0], 24)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.frequency_ext))))
self.mam4.parse_frequency_ext()
self.assertEqual(self.mam4.frequency_ext.shape[0], 6)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.frequency_ext))))
def test_parse_gradient(self):
self.mam3.parse_gradient()
self.assertEqual(self.mam3.gradient.shape[0], 10)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.gradient))))
self.mam4.parse_gradient()
self.assertEqual(self.mam4.gradient.shape[0], 4)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.gradient))))
def test_shielding_tensor(self):
self.nitro_nmr.parse_nmr_shielding()
self.assertEqual(self.nitro_nmr.nmr_shielding.shape[0], 15)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nitro_nmr.nmr_shielding))))
def test_to_universe(self):
"""Test the to_universe method."""
mam1 = self.mam1.to_universe(ignore=True)
mam2 = self.mam2.to_universe(ignore=True)
for uni in [mam1, mam2]:
# cannot add frequency and frequency_ext attributes as they require
# very specific inputs
for attr in ['atom', 'basis_set', 'basis_set_order',
'momatrix', 'orbital', 'frame']:
self.assertTrue(hasattr(uni, attr))
class TestOutput(TestCase):
"""
This test ensures that the parsing functionality works on
a smattering of output files that were generated with the
Gaussian software package. Target syntax is for Gaussian
09.
"""
def setUp(self):
# TODO : add some cartesian basis set files
self.uo2 = Output(resource('g09-uo2.out'))
self.mam3 = Output(resource('g09-ch3nh2-631g.out'))
self.mam4 = Output(resource('g09-ch3nh2-augccpvdz.out'))
# need two because of the current limitations in the parse_frequency code
self.meth_opt = Output(resource('g16-methyloxirane-def2tzvp-opt.out'))
self.meth_freq = Output(resource('g16-methyloxirane-def2tzvp-freq.out'))
self.nap_tddft = Output(resource('g16-naproxen-def2tzvp-tddft.out'))
self.h2o2_tddft = Output(resource('g16-h2o2-def2tzvp-tddft.out'))
self.nap_opt = Output(resource('g16-naproxen-def2tzvp-opt.out'))
self.nitro_nmr = Output(resource('g16-nitromalonamide-6-31++g-nmr.out'))
# to test having both a geometry optimization and frequencies calculation
self.meth_opt_freq_hp = Output(resource('g16-methyloxirane-def2tzvp-opt-freq.out'))
def test_parse_atom(self):
self.uo2.parse_atom()
self.assertEqual(self.uo2.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.atom))))
self.mam3.parse_atom()
self.assertEqual(self.mam3.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.atom))))
self.mam4.parse_atom()
self.assertEqual(self.mam4.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.atom))))
self.meth_opt.parse_atom()
self.assertEqual(self.meth_opt.atom.shape[0], 120)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt.atom))))
self.nap_opt.parse_atom()
self.assertEqual(self.nap_opt.atom.shape[0], 806)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nap_opt.atom))))
self.meth_opt_freq_hp.parse_atom()
self.assertEqual(self.meth_opt_freq_hp.atom.shape[0], 130)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt_freq_hp.atom))))
def test_parse_basis_set(self):
self.uo2.parse_basis_set()
self.assertEqual(self.uo2.basis_set.shape[0], 49)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.basis_set))))
self.mam3.parse_basis_set()
self.assertEqual(self.mam3.basis_set.shape[0], 32)
cols = list(set(self.mam3.basis_set._columns))
test = pd.DataFrame(self.mam3.basis_set[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mam4.parse_basis_set()
self.assertEqual(self.mam4.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.basis_set))))
def test_parse_orbital(self):
self.uo2.parse_orbital()
self.assertEqual(self.uo2.orbital.shape[0], 141)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.orbital))))
self.mam3.parse_orbital()
self.assertEqual(self.mam3.orbital.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.orbital))))
self.mam4.parse_orbital()
self.assertEqual(self.mam4.orbital.shape[0], 91)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.orbital))))
self.meth_opt.parse_orbital()
self.assertEqual(self.meth_opt.orbital.shape[0], 160)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.meth_opt.orbital))))
self.nap_tddft.parse_orbital()
self.assertEqual(self.nap_tddft.orbital.shape[0], 611)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.nap_tddft.orbital))))
def test_parse_momatrix(self):
self.uo2.parse_momatrix()
self.assertEqual(self.uo2.momatrix.shape[0], 19881)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.momatrix))))
self.mam3.parse_momatrix()
self.assertEqual(self.mam3.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.momatrix))))
self.mam4.parse_momatrix()
self.assertEqual(self.mam4.momatrix.shape[0], 8281)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam4.momatrix))))
def test_parse_basis_set_order(self):
self.uo2.parse_basis_set_order()
self.assertEqual(self.uo2.basis_set_order.shape[0], 141)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2.basis_set_order))))
self.mam3.parse_basis_set_order()
self.assertEqual(self.mam3.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam3.basis_set_order))))
self.mam4.parse_basis_set_order()
self.assertEqual(self.mam4.basis_set_order.shape[0], 91)
self.assertTrue(np.all(pd.notnull(
|
pd.DataFrame(self.mam4.basis_set_order)
|
pandas.DataFrame
|
from datetime import timedelta
import pandas as pd
import numpy as np
from exceptions import MatchCheckpointsException
def find_acute(df, i, segment, near, epsilon):
"""
1. Calculate the distance from the chekpoint to all consecutive points in the data.
2. Acute, For ck "i" if the angle between the lines ck:A and A:B acute. If so then ck is "between" A and B
Epislon is the fudge factor
"""
point = (segment['location']['lat'], segment['location']['lon'])
df[f'ck_to_A{i}'] = np.linalg.norm(df[['Latitude', 'Longitude']].values - point, axis=1)
df[f'ck_to_B{i}'] = np.linalg.norm(df[['shift_Latitude', 'shift_Longitude']].values - point,
axis=1)
if df[f'ck_to_A{i}'].min() > near * 10:
raise MatchCheckpointsException(
f"It appears you never made it close to checkpoint {segment['segment_name']}")
df['acute'] = df[f'ck_to_A{i}'] ** 2 + df['dist_to_next'] ** 2 <= df[
f'ck_to_B{i}'] ** 2 + epsilon
def match_checkpoints(df, epsilon, near, segments):
"""
Identify the activity point the represents the arrival at the checkpoint
find near points that form acute triangles
"""
row_slice = 0
for i, seg in enumerate(segments):
try:
find_acute(df, i, seg, near, epsilon)
# assign segment number to first acute point near the point seg point)
df.loc[
df[row_slice:][(df[row_slice:][f'ck_to_A{i}'] <= near) &
(df[row_slice:].acute)].index[0], ['checkpoint', 'Segment_name']] = i, seg[
'Segment_name']
# This removes the points we have past.
row_slice = int(df[df.checkpoint == i].index[0])
# df['seg_duration'] = df[df.checkpoint >= 0]['Date_Time'].diff()
except Exception as e:
raise MatchCheckpointsException(
f"Fail on checkpoint:{i} location: {(seg['location']['lat'], seg['location']['lon'])}\nDataframe columns:\n{df.columns}")
def calculate_segment_times(df, segments):
"""
This is for fix distance segments, competing for time
this selects only rows match to checkpoints. The calcs the diff in time.
"""
df['seg_duration'] = df[df.checkpoint >= 0]['Date_Time'].diff()
df['segment'] = df.checkpoint.fillna(method='ffill')
# Set everything at the end to nan
df['segment'][df.segment >= len(segments) - 1] = np.nan
# TODO Add segment metrics
def calculate_segment_distance(df, segments):
"""
This is for fixed distance competeing for distance TicToc
[{
'segment_name': 'Event Start',
'location': {'lat': 39.737912, 'lon': -105.523881},
'type_name': 'transport',
'type_args': {'time_limit': 1800}
'duration': Timedelta('0 days 00:24:21'),
'datetime': Timestamp('2012-07-21 09:18:13'),
'distance': 25677
'total_timed': datetime.timedelta(0),
total_timed_types: {'uphill':Timedelta(123), 'gravel': Timedelta(321)}
},]
"""
results = []
for i, seg in enumerate(segments):
if seg['type_name'] == 'tictoc':
seg_start_time = df[df.checkpoint == i].Date_Time.values[0]
seg_end_time = seg_start_time + pd.Timedelta(minutes=seg['type_args']['tictoc'])
seg_past_end = df[df.Date_Time >= seg_end_time].iloc[0]
seg_before_end = df[df.Date_Time <= seg_end_time].iloc[-1]
a = seg_before_end.distance
b = seg_past_end.distance
c = seg_before_end.Date_Time
d = seg_past_end.Date_Time
p = seg_end_time
seg_finish = (b - a) * ((p - d) / (d - c)) + a
seg_distance = seg_finish - df[df.checkpoint == i].distance.iloc[0]
seg['distance'] = seg_distance
seg['duration'] = timedelta(minutes=seg['type_args']['tictoc'])
seg['datetime'] = pd.to_datetime(seg_start_time)
results.append(seg)
else:
results.append(seg)
return results
def select_near_points(self, check_point, df):
"""
TODO: Work in progress
Selects points near the checkpoints:
These may be anywhere in the activity, but that seems ok.
"""
df['Date_Time'] = df.Date_Time.astype(np.int64)
columns = ['Date_Time', 'Latitude', 'Longitude', 'Altitude']
start = 10
end = 11
rows = 7 # actually get rows - 2
realend = (end - start) * 5 + start
for i in range(start, realend, rows):
curr_row = df[columns].iloc[i]
next_row = df[columns].iloc[i + 1]
new_df = pd.DataFrame(np.linspace(curr_row, next_row, rows), columns=columns)
df =
|
pd.concat([df[:i], new_df, df[i + rows:]], ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 02:31:35 2020
@author: Andrew
"""
#%%
from medical_ML import calc_auc_conf_interval
import pandas as pd
results_path = '../Results/allvars_pce_nhwblack_0913/pce_asian'
ress = pd.read_csv(results_path + '/all_results_test.csv')
ress.columns = ['model', 'AUC']
pred_probs = pd.read_csv(results_path + '/test_predicted_probs_' + ress.model[0] + '.csv')
new_res = []
for i in range(ress.shape[0]):
sdc, lc, _, hc = calc_auc_conf_interval(ress.AUC.iloc[i], (pred_probs.y == 0).sum(), (pred_probs.y ==1).sum())
new_res.append([ress.AUC.iloc[i], sdc, lc, hc, '{:.3f} ({:.3f}-{:.3f})'.format(ress.AUC.iloc[i], lc, hc)])
resdf =
|
pd.DataFrame(new_res)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[7]:
#initialize elo ranking - set E_i(0) = 1500 in other words set initial elo ranking to 1500 for all players
#get players list
import pandas as pd
players_list = pd.read_csv('players_data_1.csv')
players_list = players_list['player_id']
players_list = players_list.to_list()
players_list
# In[8]:
#load matches data
matches_df = pd.read_csv('final_df.csv')
# In[41]:
#initialize elo rankings
matches_df['elo_1'] = 0
matches_df['elo_2'] = 0
index_1 = []
index_2 = []
for item in players_list:
temp = matches_df.loc[ (matches_df['player_id'] == item) | (matches_df['opponent_id'] == item)]
temp = temp.sort_values(by='start_date')
index = temp.index[0]
if temp.iloc[0]['player_id'] == str(item):
index_1.append(index)
if temp.iloc[0]['opponent_id'] == str(item):
index_2.append(index)
temp = None
index = None
# In[63]:
len(index_1) + len(index_2) == len(players_list)
# In[57]:
#Set initial elo's to 1500
for i in index_1:
matches_df.at[i,'elo_1'] = 1500
for i in index_2:
matches_df.at[i,'elo_2'] = 1500
# In[64]:
matches_df.loc[(matches_df['elo_1'] == 1500) | (matches_df['elo_2']==1500)]
# In[1]:
#checkpoint: save df to csv
matches_df.to_csv('final_df.csv')
# In[1]:
#start from checkpoint
import pandas as pd
matches_df =
|
pd.read_csv('final_df.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
#zapiši v pdffajle glavni in pomožni seznam po čredah
import pandas as pd
import numpy as np
glavniSeznam = pd.read_csv('/home/janao/Documents/F4F/OdbiraZivali/RjaveKrave_928_15022017.csv', sep=" ")
glavniSeznam = pd.DataFrame(glavniSeznam, columns=['CRE_SIFRA_CREDA','ID_ZIVALI','DAT_ROJSTVO','ZIV_ID_SEQ'])
pomSeznam = pd.read_csv('/home/janao/Documents/F4F/OdbiraZivali/seznamB_15022017.csv', sep=" ")
pomSeznam =
|
pd.DataFrame(pomSeznam, columns=['CRE_SIFRA_CREDA','ID_ZIVALI','DAT_ROJSTVO','ZIV_ID_SEQ','rel'])
|
pandas.DataFrame
|
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def _check_moment_func(
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
series=None,
frame=None,
**kwargs,
):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(series[-50:]))
frame_result = get_result(frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
ser = series[::2].resample("B").mean()
frm = frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(ser, window=win, min_periods=minp)
frame_result = get_result(frm, window=win, min_periods=minp)
else:
series_result = get_result(ser, window=win, min_periods=0)
frame_result = get_result(frm, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(series) - 1, len(series)):
result = get_result(series, len(series) + 1, min_periods=minp)
expected = get_result(series, len(series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(series, len(series) + 1, min_periods=0)
expected = get_result(series, len(series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask,
|
isna(expected)
|
pandas.isna
|
# -*- coding: utf-8 -*-
"""
AIDeveloper
---------
@author: maikherbig
"""
import os,sys,gc
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'#suppress warnings/info from tensorflow
if not sys.platform.startswith("win"):
from multiprocessing import freeze_support
freeze_support()
# Make sure to get the right icon file on win,linux and mac
if sys.platform=="darwin":
icon_suff = ".icns"
else:
icon_suff = ".ico"
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtWidgets, QtGui
from pyqtgraph import Qt
import aid_start
dir_root = os.path.dirname(aid_start.__file__)#ask the module for its origin
dir_settings = os.path.join(dir_root,"aid_settings.json")#dir to settings
Default_dict = aid_start.get_default_dict(dir_settings)
#try:
# splashapp = QtWidgets.QApplication(sys.argv)
# #splashapp.setWindowIcon(QtGui.QIcon("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256.ico"))
# # Create and display the splash screen
# splash_pix = os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff)
# splash_pix = QtGui.QPixmap(splash_pix)
# #splash_pix = QtGui.QPixmap("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256"+icon_suff)
# splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# splash.setMask(splash_pix.mask())
# splash.show()
#except:
# pass
#BEFORE importing tensorflow or anything from keras: make sure the keras.json has
#certain properties
keras_json_path = os.path.expanduser('~')+os.sep+'.keras'+os.sep+'keras.json'
if not os.path.isdir(os.path.expanduser('~')+os.sep+'.keras'):
os.mkdir(os.path.expanduser('~')+os.sep+'.keras')
aid_start.banner() #show a fancy banner in console
aid_start.keras_json_check(keras_json_path)
import traceback,shutil,re,ast,io,platform
import h5py,json,time,copy,urllib,datetime
from stat import S_IREAD,S_IRGRP,S_IROTH,S_IWRITE,S_IWGRP,S_IWOTH
import tensorflow as tf
from tensorboard import program
from tensorboard import default
from tensorflow.python.client import device_lib
devices = device_lib.list_local_devices()
device_types = [devices[i].device_type for i in range(len(devices))]
#Get the number of CPU cores and GPUs
cpu_nr = os.cpu_count()
gpu_nr = device_types.count("GPU")
print("Nr. of GPUs detected: "+str(gpu_nr))
print("Found "+str(len(devices))+" device(s):")
print("------------------------")
for i in range(len(devices)):
print("Device "+str(i)+": "+devices[i].name)
print("Device type: "+devices[i].device_type)
print("Device description: "+devices[i].physical_device_desc)
print("------------------------")
#Split CPU and GPU into two lists of devices
devices_cpu = []
devices_gpu = []
for dev in devices:
if dev.device_type=="CPU":
devices_cpu.append(dev)
elif dev.device_type=="GPU":
devices_gpu.append(dev)
else:
print("Unknown device type:"+str(dev)+"\n")
import numpy as np
rand_state = np.random.RandomState(117) #to get the same random number on diff. PCs
from scipy import ndimage,misc
from sklearn import metrics,preprocessing
import PIL
import dclab
import cv2
import pandas as pd
import openpyxl,xlrd
import psutil
from keras.models import model_from_json,model_from_config,load_model,clone_model
from keras import backend as K
if 'GPU' in device_types:
keras_gpu_avail = K.tensorflow_backend._get_available_gpus()
if len(keras_gpu_avail)>0:
print("Following GPU is used:")
print(keras_gpu_avail)
print("------------------------")
else:
print("TensorFlow detected GPU, but Keras didn't")
print("------------------------")
from keras.preprocessing.image import load_img
from keras.utils import np_utils,multi_gpu_model
from keras.utils.conv_utils import convert_kernel
import keras_metrics #side package for precision, recall etc during training
global keras_metrics
import model_zoo
from keras2onnx import convert_keras
from onnx import save_model as save_onnx
import aid_img, aid_dl, aid_bin
import aid_frontend
from partial_trainability import partial_trainability
import aid_imports
VERSION = "0.2.3" #Python 3.5.6 Version
model_zoo_version = model_zoo.__version__()
print("AIDeveloper Version: "+VERSION)
print("model_zoo.py Version: "+model_zoo.__version__())
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
tooltips = aid_start.get_tooltips()
class MyPopup(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
class WorkerSignals(QtCore.QObject):
'''
Code inspired from here: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
history
`dict` containing keras model history.history resulting from .fit
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
history = QtCore.pyqtSignal(dict)
class Worker(QtCore.QRunnable):
'''
Code inspired/copied from: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
self.kwargs['history_callback'] = self.signals.history
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi()
def setupUi(self):
aid_frontend.setup_main_ui(self,gpu_nr)
def retranslateUi(self):
aid_frontend.retranslate_main_ui(self,gpu_nr,VERSION)
def dataDropped(self, l):
#If there is data stored on ram tell user that RAM needs to be refreshed!
if len(self.ram)>0:
self.statusbar.showMessage("Newly added data is not yet in RAM. Only RAM data will be used. Use ->'File'->'Data to RAM now' to update RAM",5000)
#l is a list of some filenames (.rtdc) or folders (containing .jpg, jpeg, .png)
#Iterate over l and check if it is a folder or a file (directory)
isfile = [os.path.isfile(str(url)) for url in l]
isfolder = [os.path.isdir(str(url)) for url in l]
#####################For folders with images:##########################
#where are folders?
ind_true = np.where(np.array(isfolder)==True)[0]
foldernames = list(np.array(l)[ind_true]) #select the indices that are valid
#On mac, there is a trailing / in case of folders; remove them
foldernames = [os.path.normpath(url) for url in foldernames]
basename = [os.path.basename(f) for f in foldernames]
#Look quickly inside the folders and ask the user if he wants to convert
#to .rtdc (might take a while!)
if len(foldernames)>0: #User dropped (also) folders (which may contain images)
# filecounts = []
# for i in range(len(foldernames)):
# url = foldernames[i]
# files = os.listdir(url)
# files_full = [os.path.join(url,files[i]) for i in range(len(files))]
# filecounts.append(len([f for f in files_full if os.path.isfile(f)]))
# Text = []
# for b,n in zip(basename,filecounts):
# Text.append(b+": "+str(n)+" images")
# Text = "\n".join(Text)
Text = "Images from single folders are read and saved to individual \
.rtdc files with the same name like the corresponding folder.<b>If \
you have RGB images you can either save the full RGB information, \
or do a conversion to Grayscale (saves some diskspace but information \
about color is lost). RGB is recommended since AID will automatically\
do the conversion to grayscale later if required.<b>If you have \
Grayscale images, a conversion to RGB will just copy the info to all \
channels, which allows you to use RGB-mode and Grayscale-mode lateron."
Text = Text+"\nImages from following folders will be converted:\n"+"\n".join(basename)
#Show the user a summary with all the found folders and how many files are
#contained. Ask if he want to convert
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the images of the chosen folder(s)\
be converted to .rtdc using <b>RGB</b> or <b>Grayscale</b> format? <b>\
(RGB is recommended!)</b> Either option might take some time. You can \
reuse the .rtdc file next time.</p></body></html>"
msg.setText(text)
msg.setDetailedText(Text)
msg.setWindowTitle("Format for conversion to .rtdc (RGB/Grayscale)")
msg.addButton(QtGui.QPushButton('Convert to Grayscale'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Convert to RGB'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
#Conversion of images in folders is (almost) independent from what
#is going to be fitted (So I leave the option menu still!)
#In options: Color Mode one can still use RGB mode and export here as
#Grayscale (but this would actually not work since RGB information is lost).
#The other way around works. Therefore it is recommended to export RGB!
if retval==0:
color_mode = "Grayscale"
channels = 1
elif retval==1:
color_mode = "RGB"
channels = 3
else:
return
self.statusbar.showMessage("Color mode' "+color_mode+"' is used",5000)
url_converted = []
for i in range(len(foldernames)):
url = foldernames[i]
print("Start converting images in\n"+url)
#try:
#get a list of files inside this directory:
images,pos_x,pos_y = [],[],[]
for root, dirs, files in os.walk(url):
for file in files:
try:
path = os.path.join(root, file)
img = load_img(path,color_mode=color_mode.lower()) #This uses PIL and supports many many formats!
images.append(np.array(img)) #append nice numpy array to list
#create pos_x and pos_y
pos_x.append( int(np.round(img.width/2.0,0)) )
pos_y.append( int(np.round(img.height/2.0,0)) )
except:
pass
#Thanks to andko76 for pointing that unequal image sizes cause an error:
#https://github.com/maikherbig/AIDeveloper/issues/1
#Check that all images have the same size
# img_shape_errors = 0
# text_error = "Images have unequal dimensions:"
# img_h = [a.shape[0] for a in images]
# img_h_uni = len(np.unique(img_h))
# if img_h_uni!=1:
# text_error += "\n- found unequal heights"
# img_shape_errors=1
# img_w = [a.shape[1] for a in images]
# img_w_uni = len(np.unique(img_w))
# if img_w_uni!=1:
# text_error += "\n- found unequal widths"
# img_shape_errors=1
# img_c = [len(a.shape) for a in images]
# img_c_uni = len(np.unique(img_c))
# if img_c_uni!=1:
# text_error += "\n- found unequal numbers of channels"
# img_shape_errors=1
# #If there were issues detected, show error message
# if img_shape_errors==1:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Warning)
# msg.setText(str(text_error))
# msg.setWindowTitle("Error: Unequal image shapes")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
#Get a list of occuring image dimensions (width and height)
img_shape = [a.shape[0] for a in images] + [a.shape[1] for a in images]
dims = np.unique(img_shape)
#Get a list of occurences of image shapes
img_shape = [str(a.shape[0])+" x "+str(a.shape[1]) for a in images]
occurences = np.unique(img_shape,return_counts=True)
#inform user if there is more than one img shape
if len(occurences[0])>1 or len(dims)>1:
text_detail = "Path: "+url
text_detail += "\nFollowing image shapes are present"
for i in range(len(occurences[0])):
text_detail+="\n- "+str(occurences[1][i])+" times: "+str(occurences[0][i])
self.popup_imgRes = QtGui.QDialog()
self.popup_imgRes_ui = aid_frontend.popup_imageLoadResize()
self.popup_imgRes_ui.setupUi(self.popup_imgRes) #open a popup to show options for image resizing (make image equally sized)
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
self.popup_imgRes.setWindowModality(QtCore.Qt.ApplicationModal)
#Insert information into textBrowser
self.popup_imgRes_ui.textBrowser_imgResize_occurences.setText(text_detail)
Image_import_dimension = Default_dict["Image_import_dimension"]
self.popup_imgRes_ui.spinBox_ingResize_h_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_h_2.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_2.setValue(Image_import_dimension)
Image_import_interpol_method = Default_dict["Image_import_interpol_method"]
index = self.popup_imgRes_ui.comboBox_resizeMethod.findText(Image_import_interpol_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.popup_imgRes_ui.comboBox_resizeMethod.setCurrentIndex(index)
#Define function for the OK button:
def popup_imgRes_ok(images,channels,pos_x,pos_y):
print("Start resizing operation")
#Get info from GUI
final_h = int(self.popup_imgRes_ui.spinBox_ingResize_h_1.value())
print("Height:"+str(final_h))
final_w = int(self.popup_imgRes_ui.spinBox_ingResize_w_1.value())
print("Width:"+str(final_w))
Default_dict["Image_import_dimension"] = final_h
pix = 1
if self.popup_imgRes_ui.radioButton_imgResize_cropPad.isChecked():#cropping and padding method
images = aid_img.image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode="cv2.BORDER_CONSTANT")
elif self.popup_imgRes_ui.radioButton_imgResize_interpolate.isChecked():
interpolation_method = str(self.popup_imgRes_ui.comboBox_resizeMethod.currentText())
Default_dict["Image_import_interpol_method"] = interpolation_method
images = aid_img.image_resize_scale(images,pos_x,pos_y,final_h,final_w,channels,interpolation_method,verbose=False)
else:
print("Invalid image resize method!")
#Save the Default_dict
aid_bin.save_aid_settings(Default_dict)
self.popup_imgRes.accept()
return images
#Define function for the Cancel button:
def popup_imgRes_cancel():
self.popup_imgRes.close()
return
self.popup_imgRes_ui.pushButton_imgResize_ok.clicked.connect(lambda: popup_imgRes_ok(images,channels,pos_x,pos_y))
self.popup_imgRes_ui.pushButton_imgResize_cancel.clicked.connect(popup_imgRes_cancel)
retval = self.popup_imgRes.exec_()
#retval is 0 if the user clicked cancel or just closed the window; in this case just exist the function
if retval==0:
return
#get new pos_x, pos_y (after cropping, the pixel value for the middle of the image is different!)
pos_x = [int(np.round(img.shape[1]/2.0,0)) for img in images]
pos_y = [int(np.round(img.shape[0]/2.0,0)) for img in images]
#Now, all images are of identical shape and can be converted to a numpy array
images = np.array((images), dtype="uint8")
pos_x = np.array((pos_x), dtype="uint8")
pos_y = np.array((pos_y), dtype="uint8")
#Save as foldername.rtdc
fname = url+".rtdc"
if os.path.isfile(fname):
#ask user if file can be overwritten
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>File:"+fname+" already exists. Should it be overwritten?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Overwrite file?")
msg.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
try:
os.remove(fname)
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
retval = msg.exec_()
elif retval==1:
pass
else:
pass
else:#file does not yet exist. Create it
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
print("Finished converting! Final dimension of image tensor is:"+str(images.shape))
#Now load the created files directly to drag/drop-region!
self.dataDropped(url_converted)
#####################For .rtdc files:##################################
#where are files?
ind_true = np.where(np.array(isfile)==True)[0]
filenames = list(np.array(l)[ind_true]) #select the indices that are valid
#check if the file can be opened and get some information
fileinfo = []
for i in range(len(filenames)):
rtdc_path = filenames[i]
try:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
features = list(rtdc_ds["events"].keys())
#Make sure that there is "images", "pos_x" and "pos_y" available
if "image" in features and "pos_x" in features and "pos_y" in features:
nr_images = rtdc_ds["events"]["image"].len()
pix = rtdc_ds.attrs["imaging:pixel size"]
xtra_in_available = len(rtdc_ds.keys())>2 #Is True, only if there are more than 2 elements.
fileinfo.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"pix":pix,"xtra_in":xtra_in_available})
else:
missing = []
for feat in ["image","pos_x","pos_y"]:
if feat not in features:
missing.append(feat)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Essential feature(s) are missing in data-set")
msg.setDetailedText("Data-set: "+rtdc_path+"\nis missing "+str(missing))
msg.setWindowTitle("Missing essential features")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
except Exception as e:
print(e)
#Add the stuff to the combobox on Plot/Peak Tab
url_list = [fileinfo[iterator]["rtdc_path"] for iterator in range(len(fileinfo))]
self.comboBox_chooseRtdcFile.addItems(url_list)
self.comboBox_selectData.addItems(url_list)
if len(url_list)==0: #This fixes the issue that the prog. crashes if accidentially a tableitem is dragged and "dropped" on the table
return
width=self.comboBox_selectData.fontMetrics().boundingRect(max(url_list, key=len)).width()
self.comboBox_selectData.view().setFixedWidth(width+10)
for rowNumber in range(len(fileinfo)):#for url in l:
url = fileinfo[rowNumber]["rtdc_path"]
#add to table
rowPosition = self.table_dragdrop.rowCount()
self.table_dragdrop.insertRow(rowPosition)
columnPosition = 0
line = QtWidgets.QLabel(self.table_dragdrop)
line.setText(url)
line.setDisabled(True)
line.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, line)
# item = QtWidgets.QTableWidgetItem(url)
# item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
# print(item.textAlignment())
# item.setTextAlignment(QtCore.Qt.AlignRight) # change the alignment
# #item.setTextAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AnchorRight) # change the alignment
# self.table_dragdrop.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
spinb = QtWidgets.QSpinBox(self.table_dragdrop)
spinb.valueChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, spinb)
for columnPosition in range(2,4):
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#Place a button which allows to show a plot (scatter, histo...lets see)
btn = QtWidgets.QPushButton(self.table_dragdrop)
btn.setMinimumSize(QtCore.QSize(50, 30))
btn.setMaximumSize(QtCore.QSize(50, 30))
btn.clicked.connect(self.button_hist)
btn.setText('Plot')
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, btn)
self.table_dragdrop.resizeRowsToContents()
# columnPosition = 5
# #Place a combobox with the available features
# cb = QtWidgets.QComboBox(self.table_dragdrop)
# cb.addItems(fileinfo[rowNumber]["features"])
# cb.setMinimumSize(QtCore.QSize(70, 30))
# cb.setMaximumSize(QtCore.QSize(70, 30))
# width=cb.fontMetrics().boundingRect(max(fileinfo[rowNumber]["features"], key=len)).width()
# cb.view().setFixedWidth(width+30)
# self.table_dragdrop.setCellWidget(rowPosition, columnPosition, cb)
columnPosition = 5
#Place a combobox with the available features
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, fileinfo[rowNumber]["nr_images"])
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 6
#Field to user-define nr. of cells/epoch
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole,100)
#item.cellChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 7
#Pixel size
item = QtWidgets.QTableWidgetItem()
pix = float(fileinfo[rowNumber]["pix"])
#print(pix)
item.setData(QtCore.Qt.EditRole,pix)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 8
#Should data be shuffled (random?)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Checked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 9
#Zooming factor
item = QtWidgets.QTableWidgetItem()
zoom = 1.0
item.setData(QtCore.Qt.EditRole,zoom)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 10
#Should xtra_data be used?
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
xtra_in_available = fileinfo[rowNumber]["xtra_in"]
if xtra_in_available:
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
else:
item.setFlags( QtCore.Qt.ItemIsUserCheckable )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
#Functions for Keras augmentation checkboxes
def keras_changed_rotation(self,on_or_off):
if on_or_off==0:
self.lineEdit_Rotation.setText(str(0))
self.lineEdit_Rotation.setEnabled(False)
elif on_or_off==2:
self.lineEdit_Rotation.setText(str(Default_dict ["rotation"]))
self.lineEdit_Rotation.setEnabled(True)
else:
return
def keras_changed_width_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_widthShift.setText(str(0))
self.lineEdit_widthShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_widthShift.setText(str(Default_dict ["width_shift"]))
self.lineEdit_widthShift.setEnabled(True)
else:
return
def keras_changed_height_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_heightShift.setText(str(0))
self.lineEdit_heightShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_heightShift.setText(str(Default_dict ["height_shift"]))
self.lineEdit_heightShift.setEnabled(True)
else:
return
def keras_changed_zoom(self,on_or_off):
if on_or_off==0:
self.lineEdit_zoomRange.setText(str(0))
self.lineEdit_zoomRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_zoomRange.setText(str(Default_dict ["zoom"]))
self.lineEdit_zoomRange.setEnabled(True)
else:
return
def keras_changed_shear(self,on_or_off):
if on_or_off==0:
self.lineEdit_shearRange.setText(str(0))
self.lineEdit_shearRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_shearRange.setText(str(Default_dict ["shear"]))
self.lineEdit_shearRange.setEnabled(True)
else:
return
def keras_changed_brightplus(self,on_or_off):
if on_or_off==0:
self.spinBox_PlusLower.setValue(0)
self.spinBox_PlusLower.setEnabled(False)
self.spinBox_PlusUpper.setValue(0)
self.spinBox_PlusUpper.setEnabled(False)
elif on_or_off==2:
self.spinBox_PlusLower.setValue(Default_dict ["Brightness add. lower"])
self.spinBox_PlusLower.setEnabled(True)
self.spinBox_PlusUpper.setValue(Default_dict ["Brightness add. upper"])
self.spinBox_PlusUpper.setEnabled(True)
else:
return
def keras_changed_brightmult(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_MultLower.setValue(1.0)
self.doubleSpinBox_MultLower.setEnabled(False)
self.doubleSpinBox_MultUpper.setValue(1.0)
self.doubleSpinBox_MultUpper.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_MultLower.setValue(Default_dict ["Brightness mult. lower"])
self.doubleSpinBox_MultLower.setEnabled(True)
self.doubleSpinBox_MultUpper.setValue(Default_dict ["Brightness mult. upper"])
self.doubleSpinBox_MultUpper.setEnabled(True)
else:
return
def keras_changed_noiseMean(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseMean.setValue(0.0)
self.doubleSpinBox_GaussianNoiseMean.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseMean.setValue(Default_dict ["Gaussnoise Mean"])
self.doubleSpinBox_GaussianNoiseMean.setEnabled(True)
else:
return
def keras_changed_noiseScale(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseScale.setValue(0.0)
self.doubleSpinBox_GaussianNoiseScale.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseScale.setValue(Default_dict ["Gaussnoise Scale"])
self.doubleSpinBox_GaussianNoiseScale.setEnabled(True)
else:
return
def keras_changed_contrast(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_contrastLower.setEnabled(False)
self.doubleSpinBox_contrastHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
else:
return
def keras_changed_saturation(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
else:
return
def keras_changed_hue(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_hueDelta.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
return
def expert_mode_off(self,on_or_off):
"""
Reset all values on the expert tab to the default values, excluding the metrics
metrics are defined only once when starting fitting and should not be changed
"""
if on_or_off==0: #switch off
self.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.spinBox_epochs.setValue(1)
self.checkBox_expt_loss.setChecked(False)
self.expert_loss_off(0)
self.groupBox_learningRate.setChecked(False)
self.expert_learningrate_off(0)
self.checkBox_optimizer.setChecked(False)
self.expert_optimizer_off(0)
def expert_loss_off(self,on_or_off):
if on_or_off==0: #switch off
#switch back to categorical_crossentropy
index = self.comboBox_expt_loss.findText("categorical_crossentropy", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_expt_loss.setCurrentIndex(index)
def expert_learningrate_off(self,on_or_off):
if on_or_off==0: #switch off
#which optimizer is used? (there are different default learning-rates
#for each optimizer!)
optimizer = str(self.comboBox_optimizer.currentText())
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
self.radioButton_LrCycl.setChecked(False)
self.radioButton_LrExpo.setChecked(False)
self.radioButton_LrConst.setChecked(True)
def expert_optimizer_off(self,on_or_off):
if on_or_off==0: #switch off, set back to categorical_crossentropy
optimizer = "Adam"
index = self.comboBox_optimizer.findText(optimizer, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_optimizer.setCurrentIndex(index)
#also reset the learning rate to the default
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
def expert_optimizer_changed(self,optimizer_text,listindex):
# print("optimizer_text: "+str(optimizer_text))
# print("listindex: "+str(listindex))
if optimizer_text=="":
return
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
#set the learning rate to the default for this optimizer
value_current = float(item_ui.doubleSpinBox_learningRate.value())
value_wanted = Default_dict["doubleSpinBox_learningRate_"+optimizer_text]
#insert the current value in the optimizer_settings:
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value_current
item_ui.optimizer_settings["comboBox_optimizer"] = optimizer_text
try: #only works on the fitting-popup
text = str(item_ui.textBrowser_FittingInfo.toPlainText())
except:
text = "Epoch"
# print("text: "+str(text))
if value_current!=value_wanted and "Epoch" in text:#avoid that the message pops up when window is created
item_ui.doubleSpinBox_learningRate.setValue(value_wanted)
item_ui.doubleSpinBox_expDecInitLr.setValue(value_wanted)
#Inform user
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle("Learning rate to default")
msg.setText("Learning rate was set to the default for "+optimizer_text)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def expert_lr_changed(self,value,optimizer_text,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value
def update_hist1(self):
feature = str(self.comboBox_feat1.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
# self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_hist2(self):
feature = str(self.comboBox_feat2.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_scatter(self):
feature_x = str(self.comboBox_feat1.currentText())
feature_x_values = self.rtdc_ds["events"][feature_x]
feature_y = str(self.comboBox_feat2.currentText())
feature_y_values = self.rtdc_ds["events"][feature_y]
if len(feature_x_values)==len(feature_y_values):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
#y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(feature_x_values, feature_y_values,pen=None,symbol='o',clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def button_hist(self,item):
buttonClicked = self.sender()
index = self.table_dragdrop.indexAt(buttonClicked.pos())
rowPosition = index.row()
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
# feature_values = rtdc_ds[feature]
#Init a popup window
self.w = MyPopup()
self.w.setWindowTitle(rtdc_path)
self.w.setObjectName(_fromUtf8("w"))
self.gridLayout_w2 = QtWidgets.QGridLayout(self.w)
self.gridLayout_w2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_w2.setObjectName(_fromUtf8("gridLayout_w2"))
self.widget = QtWidgets.QWidget(self.w)
self.widget.setMinimumSize(QtCore.QSize(0, 65))
self.widget.setMaximumSize(QtCore.QSize(16777215, 65))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_w3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_w3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_w3.setObjectName(_fromUtf8("horizontalLayout_w3"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout_w"))
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout_w"))
self.comboBox_feat1 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat1.setObjectName(_fromUtf8("comboBox_feat1"))
features = list(self.rtdc_ds["events"].keys())
self.comboBox_feat1.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat1)
self.comboBox_feat2 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat2.setObjectName(_fromUtf8("comboBox_feat2"))
self.comboBox_feat2.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w)
self.horizontalLayout_w2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w2.setObjectName(_fromUtf8("horizontalLayout_w2"))
self.pushButton_Hist1 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist1.setObjectName(_fromUtf8("pushButton_Hist1"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist1)
self.pushButton_Hist2 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist2.setObjectName(_fromUtf8("pushButton_Hist2"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w2)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w)
self.verticalLayout_w2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w2.setObjectName(_fromUtf8("verticalLayout_w2"))
self.pushButton_Scatter = QtWidgets.QPushButton(self.widget)
self.pushButton_Scatter.setObjectName(_fromUtf8("pushButton_Scatter"))
self.verticalLayout_w2.addWidget(self.pushButton_Scatter)
self.checkBox_ScalePix = QtWidgets.QCheckBox(self.widget)
self.checkBox_ScalePix.setObjectName(_fromUtf8("checkBox_ScalePix"))
self.verticalLayout_w2.addWidget(self.checkBox_ScalePix)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w2)
self.gridLayout_w2.addWidget(self.widget, 0, 0, 1, 1)
self.pushButton_Hist1.setText("Hist")
self.pushButton_Hist1.clicked.connect(self.update_hist1)
self.pushButton_Hist2.setText("Hist")
self.pushButton_Hist2.clicked.connect(self.update_hist2)
self.pushButton_Scatter.setText("Scatter")
self.pushButton_Scatter.clicked.connect(self.update_scatter)
self.checkBox_ScalePix.setText("Scale by pix")
self.histogram = pg.GraphicsWindow()
self.plt1 = self.histogram.addPlot()
# y,x = np.histogram(feature_values, bins='auto')
# plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150))
self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
self.w.show()
def update_historyplot_pop(self,listindex):
#listindex = self.popupcounter-1 #len(self.fittingpopups_ui)-1
#After the first epoch there are checkboxes available. Check, if user checked some:
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked for train?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
self.Colors = Colors
Histories = self.fittingpopups_ui[listindex].Histories
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
# if len(DF1)>0:
# DF1 = pd.concat(DF1)
# else:
# return
self.fittingpopups_ui[listindex].widget_pop.clear()
#Create fresh plot
plt1 = self.fittingpopups_ui[listindex].widget_pop.addPlot()
plt1.showGrid(x=True,y=True)
plt1.addLegend()
plt1.setLabel('bottom', 'Epoch', units='')
#Create a dict that stores plots for each metric (for real time plotting)
self.fittingpopups_ui[listindex].historyscatters = dict()
for i in range(len(selected_items)):
key = selected_items[i]
df = DF1[key]
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
#print(df)
historyscatter = plt1.plot(range(len(df)), df.values, pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
#self.fittingpopups_ui[listindex].historyscatters.append(historyscatter)
self.fittingpopups_ui[listindex].historyscatters[key]=historyscatter
def stop_fitting_pop(self,listindex):
#listindex = len(self.fittingpopups_ui)-1
epochs = self.fittingpopups_ui[listindex].epoch_counter
#Stop button on the fititng popup
#Should stop the fitting process and save the metafile
#1. Change the nr. requested epochs to a smaller number
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(epochs-1)
#2. Check the box which will cause that the new parameters are applied at next epoch
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(True)
def pause_fitting_pop(self,listindex):
#Just change the text on the button
if str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())==" ":
#If the the text on the button was Pause, change it to Continue
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText("")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("background-color: green")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"continue.png")))
elif str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
#If the the text on the button was Continue, change it to Pause
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText(" ")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"pause.png")))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("")
def saveTextWindow_pop(self,listindex):
#Get the entire content of textBrowser_FittingInfo
text = str(self.fittingpopups_ui[listindex].textBrowser_FittingInfo.toPlainText())
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Fitting info', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
if len(filename)>0:
f = open(filename,'w')
f.write(text)
f.close()
def clearTextWindow_pop(self,listindex):
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.clear()
def showModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
def saveModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Model summary', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
f = open(filename,'w')
f.write(text)
f.close()
#class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
def get_class_weight(self,SelectedFiles,lossW_expert,custom_check_classes=False):
t1 = time.time()
print("Getting dictionary for class_weight")
if lossW_expert=="None":
return None
elif lossW_expert=="":
return None
elif lossW_expert=="Balanced":
#Which are training files?
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
counter = {}
for class_ in classes_uni:
ind = np.where(np.array(classes)==class_)[0]
nr_events_epoch_class = np.array(nr_events_epoch)[ind]
counter[class_] = np.sum(nr_events_epoch_class)
max_val = float(max(counter.values()))
return {class_id : max_val/num_images for class_id, num_images in counter.items()}
elif lossW_expert.startswith("{"):#Custom loss weights
class_weights = eval(lossW_expert)
if custom_check_classes:#Check that each element in classes_uni is contained in class_weights.keys()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
classes_uni = np.sort(classes_uni)
class_weights_keys = np.sort([int(a) for a in class_weights.keys()])
#each element in classes_uni has to be equal to class_weights_keys
equal = np.array_equal(classes_uni,class_weights_keys)
if equal == True:
return class_weights
else:
#If the equal is false I'm really in trouble...
#run the function again, but request 'Balanced' weights. I'm not sure if this should be the default...
class_weights = self.get_class_weight(SelectedFiles,"Balanced")
return ["Balanced",class_weights]
else:
return class_weights
t2 = time.time()
dt = np.round(t2-t1,2)
print("Comp. time = "+str(dt))
def accept_lr_range(self):
lr_start = str(self.popup_lrfinder_ui.lineEdit_LrMin.text())
lr_stop = str(self.popup_lrfinder_ui.lineEdit_LrMax.text())
if len(lr_start)>0 and len(lr_stop)>0:
self.lineEdit_cycLrMin.setText(lr_start)
self.lineEdit_cycLrMax.setText(lr_stop)
else:
print("Found no values for LR range")
def accept_lr_value(self):
single_lr = self.popup_lrfinder_ui.lineEdit_singleLr.text()
if len(single_lr)>0:
lr_value = float(single_lr)
self.doubleSpinBox_learningRate.setValue(lr_value)
self.doubleSpinBox_expDecInitLr.setValue(lr_value)
else:
print("Found no value for single LR!")
def reset_lr_settings(self):
self.popup_lrfinder_ui.lineEdit_startLr.setText(_translate("Form_LrFinder", "1e-10", None))
self.popup_lrfinder_ui.lineEdit_stopLr.setText(_translate("Form_LrFinder", "0.1", None))
self.popup_lrfinder_ui.doubleSpinBox_percDataT.setProperty("value", 100.0)
self.popup_lrfinder_ui.doubleSpinBox_percDataV.setProperty("value", 100.0)
self.popup_lrfinder_ui.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.popup_lrfinder_ui.spinBox_lineWidth.setProperty("value", 6)
self.popup_lrfinder_ui.spinBox_epochs.setProperty("value", 5)
def reset_lr_value(self):
self.popup_lrfinder_ui.lineEdit_singleLr.setText("")
#Uncheck and Check the groupbox to refresh the line
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(False)
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(True)
def reset_lr_range(self):
self.popup_lrfinder_ui.lineEdit_LrMin.setText("")
self.popup_lrfinder_ui.lineEdit_LrMax.setText("")
#Uncheck and Check the groupbox to refresh the range
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(False)
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(True)
def popup_lr_finder(self):
SelectedFiles = self.items_clicked()
self.popup_lrfinder = MyPopup()
self.popup_lrfinder_ui = aid_frontend.popup_lrfinder()
self.popup_lrfinder_ui.setupUi(self.popup_lrfinder) #open a popup for lr finder
#Get information about the model
#check, which radiobutton is clicked and just copy paste the text from there
if self.radioButton_NewModel.isChecked():
modelname = str(self.comboBox_ModelSelection.currentText())
if modelname==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_LoadContinueModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadRestartModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please specify a model using the radiobuttons on the 'Define Model' -tab")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
#Put information onto UI
self.popup_lrfinder_ui.lineEdit_loadModel.setText(modelname)
self.popup_lrfinder_ui.spinBox_Crop_inpImgSize.setValue(in_dim)
color_mode = self.get_color_mode()
self.popup_lrfinder_ui.comboBox_colorMode.addItem(color_mode)
loss_str = str(self.comboBox_expt_loss.currentText())
self.popup_lrfinder_ui.comboBox_expt_loss.addItem(loss_str)
optimizer_str = str(self.comboBox_optimizer.currentText())
self.popup_lrfinder_ui.comboBox_optimizer.addItem(optimizer_str)
batch_size = self.spinBox_batchSize.value()
self.popup_lrfinder_ui.spinBox_batchSize.setValue(batch_size)
#Connect action_lr_finder function to button
self.popup_lrfinder_ui.pushButton_LrFindRun.clicked.connect(lambda: self.action_initialize_model(duties="initialize_lrfind"))
self.popup_lrfinder_ui.pushButton_rangeAccept.clicked.connect(self.accept_lr_range)
self.popup_lrfinder_ui.pushButton_singleAccept.clicked.connect(self.accept_lr_value)
self.popup_lrfinder_ui.pushButton_LrReset.clicked.connect(self.reset_lr_settings)
self.popup_lrfinder_ui.pushButton_singleReset.clicked.connect(self.reset_lr_value)
self.popup_lrfinder_ui.pushButton_rangeReset.clicked.connect(self.reset_lr_range)
#Update the plot when any plotting option is changed
self.popup_lrfinder_ui.comboBox_metric.currentIndexChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.spinBox_lineWidth.valueChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.checkBox_smooth.toggled.connect(self.update_lrfind_plot)
#LR single value when groupbox is toggled
self.popup_lrfinder_ui.groupBox_singleLr.toggled.connect(self.get_lr_single)
#LR range when groupbox is toggled
self.popup_lrfinder_ui.groupBox_LrRange.toggled.connect(self.get_lr_range)
#compute the number of steps/epoch
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
def update_stepsPerEpoch():
batch_size = self.popup_lrfinder_ui.spinBox_batchSize.value()
perc_data = self.popup_lrfinder_ui.doubleSpinBox_percDataT.value()
nr_events = (perc_data/100)*nr_events_train_total
stepsPerEpoch = np.ceil(nr_events / float(batch_size))
self.popup_lrfinder_ui.spinBox_stepsPerEpoch.setValue(stepsPerEpoch)
update_stepsPerEpoch()
self.popup_lrfinder_ui.spinBox_batchSize.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder_ui.doubleSpinBox_percDataT.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder.show()
def popup_clr_settings(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_clrsettings = MyPopup()
item_ui.popup_clrsettings_ui = aid_frontend.Ui_Clr_settings()
item_ui.popup_clrsettings_ui.setupUi(item_ui.popup_clrsettings) #open a popup for lr plotting
##Manual insertion##
item_ui.popup_clrsettings_ui.spinBox_stepSize.setProperty("value", item_ui.clr_settings["step_size"])
item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.setProperty("value", item_ui.clr_settings["gamma"])
def clr_settings_ok():
step_size = int(item_ui.popup_clrsettings_ui.spinBox_stepSize.value())
gamma = float(item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.value())
item_ui.clr_settings["step_size"] = step_size #Number of epochs to fulfill half a cycle
item_ui.clr_settings["gamma"] = gamma #gamma factor for Exponential decrease method (exp_range)
print("Settings for cyclical learning rates were changed.")
#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
def clr_settings_cancel():#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
item_ui.popup_clrsettings_ui.pushButton_ok.clicked.connect(clr_settings_ok)
item_ui.popup_clrsettings_ui.pushButton_cancel.clicked.connect(clr_settings_cancel)
item_ui.popup_clrsettings.show()
def popup_lr_plot(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_lrplot = MyPopup()
item_ui.popup_lrplot_ui = aid_frontend.popup_lrplot()
item_ui.popup_lrplot_ui.setupUi(item_ui.popup_lrplot) #open a popup for lr plotting
#compute total number of epochs that will be fitted
spinBox_NrEpochs = item_ui.spinBox_NrEpochs.value() #my own loop
spinBox_epochs = item_ui.spinBox_epochs.value() #inside model.fit()
nr_epochs = spinBox_NrEpochs*spinBox_epochs
item_ui.popup_lrplot_ui.spinBox_totalEpochs.setValue(nr_epochs)
#Get the number of training examples
SelectedFiles = self.items_clicked()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
if nr_events_train_total==0 and item_ui.radioButton_LrConst.isChecked()==False:
#for Cyclical learning rates and Exponential learning rates, the
#number of training images is needed
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no training data. Nr. of training images is required for this plot.")
msg.setWindowTitle("Nr. of training images = 0")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text_info = ""
if item_ui.radioButton_LrConst.isChecked():
text_info+="Constant learning rate\n"
epochs_plot = np.array(range(nr_epochs))
const_lr = float(self.doubleSpinBox_learningRate.value())
learningrates = np.repeat(const_lr,nr_epochs)
elif item_ui.radioButton_LrCycl.isChecked():
text_info+="Cyclical learning rates\n"
base_lr = float(item_ui.lineEdit_cycLrMin.text())
max_lr = float(item_ui.lineEdit_cycLrMax.text())
batch_size = int(item_ui.spinBox_batchSize.value())
step_size = item_ui.clr_settings["step_size"] #batch updates in a half cycle
step_size_ = step_size*int(np.round(nr_events_train_total / batch_size))#number of steps in one epoch
mode = str(item_ui.comboBox_cycLrMethod.currentText())
clr_iterations = nr_epochs*int(np.round(nr_events_train_total / batch_size))#number of cycles
nr_cycles = (clr_iterations/step_size_)/2.0#number of cycles
gamma = item_ui.clr_settings["gamma"] #gamma factor for the exp_range
#Generate text to diplay the settings used
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="base_lr: "+str(base_lr)+"\n"
text_info+="max_lr: "+str(max_lr)+"\n"
text_info+="batch_size: "+str(batch_size)+"\n"
text_info+="mode: "+str(mode)+"\n"
text_info+="gamma: "+str(gamma)+"\n"
text_info+="Nr. of epochs to fulfill one cycle: "+str(2*step_size)+"\n"
#text_info+="Total nr. of lr adjustmend: "+str(step_size_)+"\n"
text_info+="Total nr. of lr adjustments: "+str(clr_iterations)+"\n"
text_info+="Total nr. of cycles: "+str(nr_cycles)+"\n"
#Request the learning rates from the class cyclicLR
clr_iterations = np.arange(clr_iterations)
clr_1 = aid_dl.cyclicLR(base_lr=base_lr,max_lr=max_lr,step_size=step_size_,mode=mode,gamma=gamma)
clr_1.clr_iterations=clr_iterations#pass the number of clr iterations to the class
learningrates = clr_1.clr() #compute the learning rates for each iteration
#convert clr_iterations back to "epochs"
epochs_plot = clr_iterations/int(np.round(nr_events_train_total / batch_size))
elif item_ui.radioButton_LrExpo.isChecked():
text_info+="Exponentially decreased learning rates\n"
initial_lr = float(item_ui.doubleSpinBox_expDecInitLr.value())
decay_steps = int(item_ui.spinBox_expDecSteps.value())
decay_rate = float(item_ui.doubleSpinBox_expDecRate.value())
batch_size = int(item_ui.spinBox_batchSize.value())
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="initial_lr: "+str(initial_lr)+"\n"
text_info+="decay_steps: "+str(decay_steps)+"\n"
text_info+="decay_rate: "+str(decay_rate)+"\n"
#epochs_plot = np.array(range(nr_epochs))
epochs_plot = nr_epochs * int(np.round(nr_events_train_total / batch_size))
epochs_plot = np.arange(epochs_plot)
exp_decay = aid_dl.exponentialDecay(initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
exp_decay.iterations=epochs_plot#pass the number of clr iterations to the class
learningrates = exp_decay.exp_decay()
epochs_plot = epochs_plot/int(np.round(nr_events_train_total / batch_size))
#learningrates = aid_dl.exponentialDecay(epochs_plot,initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
def refreshPlot():
try: # try to empty the plot
item_ui.popup_lrplot_ui.lr_plot.removeItem(item_ui.lr_line2)
except:
pass
#Get design settings
color = item_ui.popup_lrplot_ui.pushButton_color.palette().button().color()
width = int(item_ui.popup_lrplot_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor=pg.mkPen(color, width=width)
#define curve and add to plot
item_ui.lr_line2 = pg.PlotCurveItem(x=epochs_plot, y=learningrates,pen=pencolor)
item_ui.popup_lrplot_ui.lr_plot.addItem(item_ui.lr_line2)
refreshPlot()
item_ui.popup_lrplot_ui.pushButton_refreshPlot.clicked.connect(refreshPlot)
item_ui.popup_lrplot_ui.textBrowser_lrSettings.setText(text_info)
item_ui.popup_lrplot.show()
def lossWeights_activated(self,on_or_off,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
if on_or_off==False:#0 means switched OFF
item_ui.lineEdit_lossW.setText("")
item_ui.pushButton_lossW.setEnabled(False)
#this happens when the user activated the expert option "loss weights"
elif on_or_off==True:#2 means switched ON
#Activate button
item_ui.pushButton_lossW.setEnabled(True)
self.lossWeights_popup(listindex)
def lossWeights_popup(self,listindex):
if listindex==-1:
item_ui = self
SelectedFiles = self.items_clicked()
else:
item_ui = self.fittingpopups_ui[listindex]
SelectedFiles = item_ui.SelectedFiles
item_ui.popup_lossW = MyPopup()
item_ui.popup_lossW_ui = aid_frontend.popup_lossweights()
item_ui.popup_lossW_ui.setupUi(item_ui.popup_lossW) #open a popup to show the numbers of events in each class in a table
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
item_ui.popup_lossW_ui.tableWidget_lossW.setColumnCount(5)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
item_ui.popup_lossW_ui.tableWidget_lossW.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class", "Events tot." ,"Events/Epoch", "Events/Epoch[%]", "Loss weight"]
item_ui.popup_lossW_ui.tableWidget_lossW.setHorizontalHeaderLabels(header_labels)
header = item_ui.popup_lossW_ui.tableWidget_lossW.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Fill the table
rowPosition = 0
#Training info
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
#Total nr of cells for each index
for index in np.unique(indices_train):
colPos = 0 #"Class" #put the index (class!) in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
colPos = 1 #"Events tot."
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 2 #"Events/Epoch"
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 3 #"Events/Epoch[%]"
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.round(np.sum(nr_events_epoch)/float(nr_events_train_total),2)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 4 #"Loss weights"
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(item_ui.popup_lossW_ui.tableWidget_lossW)
spinb.setEnabled(False)
spinb.setMinimum(-99999)
spinb.setMaximum(99999)
spinb.setSingleStep(0.1)
spinb.setValue(1.0) #Default in Keras is "None", which means class_weight=1.0
item_ui.popup_lossW_ui.tableWidget_lossW.setCellWidget(rowPosition, colPos, spinb)
rowPosition += 1
item_ui.popup_lossW_ui.tableWidget_lossW.resizeColumnsToContents()
item_ui.popup_lossW_ui.tableWidget_lossW.resizeRowsToContents()
item_ui.popup_lossW.show()
item_ui.popup_lossW_ui.pushButton_pop_lossW_cancel.clicked.connect(lambda: self.lossW_cancel(listindex))
item_ui.popup_lossW_ui.pushButton_pop_lossW_ok.clicked.connect(lambda: self.lossW_ok(listindex))
item_ui.popup_lossW_ui.comboBox_lossW.currentIndexChanged.connect(lambda on_or_off: self.lossW_comboB(on_or_off,listindex))
def optimizer_change_settings_popup(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_optim = MyPopup()
item_ui.popup_optim_ui = aid_frontend.Ui_Form_expt_optim()
item_ui.popup_optim_ui.setupUi(item_ui.popup_optim) #open a popup to show advances settings for optimizer
##Manual insertion##
optimizer_name = item_ui.optimizer_settings["comboBox_optimizer"].lower()
if optimizer_name=='sgd':
item_ui.popup_optim_ui.radioButton_sgd.setChecked(True)
elif optimizer_name=='rmsprop':
item_ui.popup_optim_ui.radioButton_rms.setChecked(True)
elif optimizer_name=='adagrad':
item_ui.popup_optim_ui.radioButton_adagrad.setChecked(True)
elif optimizer_name=='adadelta':
item_ui.popup_optim_ui.radioButton_adadelta.setChecked(True)
elif optimizer_name=='adam':
item_ui.popup_optim_ui.radioButton_adam.setChecked(True)
elif optimizer_name=='adamax':
item_ui.popup_optim_ui.radioButton_adamax.setChecked(True)
elif optimizer_name=='nadam':
item_ui.popup_optim_ui.radioButton_nadam.setChecked(True)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(item_ui.optimizer_settings["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(item_ui.optimizer_settings["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"])
def change_lr(lr):
item_ui.doubleSpinBox_learningRate.setValue(lr)
item_ui.doubleSpinBox_expDecInitLr.setValue(lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.valueChanged.connect(change_lr)
def change_optimizer(optimizer_name):
index = item_ui.comboBox_optimizer.findText(optimizer_name, QtCore.Qt.MatchFixedString)
if index >= 0:
item_ui.comboBox_optimizer.setCurrentIndex(index)
#get the learning rate for that optimizer
lr = item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_name.lower()]
change_lr(lr)
item_ui.popup_optim_ui.radioButton_adam.toggled.connect(lambda: change_optimizer("Adam"))
item_ui.popup_optim_ui.radioButton_sgd.toggled.connect(lambda: change_optimizer("SGD"))
item_ui.popup_optim_ui.radioButton_rms.toggled.connect(lambda: change_optimizer("RMSprop"))
item_ui.popup_optim_ui.radioButton_adagrad.toggled.connect(lambda: change_optimizer("Adagrad"))
item_ui.popup_optim_ui.radioButton_adadelta.toggled.connect(lambda: change_optimizer("Adadelta"))
item_ui.popup_optim_ui.radioButton_adamax.toggled.connect(lambda: change_optimizer("Adamax"))
item_ui.popup_optim_ui.radioButton_nadam.toggled.connect(lambda: change_optimizer("Nadam"))
def ok():
doubleSpinBox_lr_sgd = float(item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.value())
doubleSpinBox_sgd_momentum = float(item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.value())
checkBox_sgd_nesterov = bool(item_ui.popup_optim_ui.checkBox_sgd_nesterov.isChecked())
doubleSpinBox_lr_rmsprop = float(item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.value())
doubleSpinBox_rms_rho = float(item_ui.popup_optim_ui.doubleSpinBox_rms_rho.value())
doubleSpinBox_lr_adam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adam.value())
doubleSpinBox_adam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.value())
doubleSpinBox_adam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.value())
checkBox_adam_amsgrad = bool(item_ui.popup_optim_ui.checkBox_adam_amsgrad.isChecked())
doubleSpinBox_lr_adadelta = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.value())
doubleSpinBox_adadelta_rho = float(item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.value())
doubleSpinBox_lr_nadam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.value())
doubleSpinBox_nadam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.value())
doubleSpinBox_nadam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.value())
doubleSpinBox_lr_adagrad = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.value())
doubleSpinBox_lr_adamax = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.value())
doubleSpinBox_adamax_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.value())
doubleSpinBox_adamax_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.value())
item_ui.optimizer_settings["doubleSpinBox_lr_sgd"] = doubleSpinBox_lr_sgd
item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"] = doubleSpinBox_sgd_momentum
item_ui.optimizer_settings["checkBox_sgd_nesterov"] = checkBox_sgd_nesterov
item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"] = doubleSpinBox_lr_rmsprop
item_ui.optimizer_settings["doubleSpinBox_rms_rho"] = doubleSpinBox_rms_rho
item_ui.optimizer_settings["doubleSpinBox_lr_adam"] = doubleSpinBox_lr_adam
item_ui.optimizer_settings["doubleSpinBox_adam_beta1"] = doubleSpinBox_adam_beta1
item_ui.optimizer_settings["doubleSpinBox_adam_beta2"] = doubleSpinBox_adam_beta2
item_ui.optimizer_settings["checkBox_adam_amsgrad"] = checkBox_adam_amsgrad
item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"] = doubleSpinBox_lr_adadelta
item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"] = doubleSpinBox_adadelta_rho
item_ui.optimizer_settings["doubleSpinBox_lr_nadam"] = doubleSpinBox_lr_nadam
item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"] = doubleSpinBox_nadam_beta1
item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"] = doubleSpinBox_nadam_beta2
item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"] = doubleSpinBox_lr_adagrad
item_ui.optimizer_settings["doubleSpinBox_lr_adamax"] = doubleSpinBox_lr_adamax
item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"] = doubleSpinBox_adamax_beta1
item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"] = doubleSpinBox_adamax_beta2
#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
print("Advanced settings for optimizer were changed.")
def cancel():#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
def reset():
print("Reset optimizer settings (in UI). To accept, click OK")
optimizer_default = aid_dl.get_optimizer_settings()
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(optimizer_default["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(optimizer_default["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(optimizer_default["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(optimizer_default["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(optimizer_default["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(optimizer_default["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(optimizer_default["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(optimizer_default["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(optimizer_default["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(optimizer_default["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(optimizer_default["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(optimizer_default["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(optimizer_default["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(optimizer_default["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(optimizer_default["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(optimizer_default["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(optimizer_default["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(optimizer_default["doubleSpinBox_adamax_beta2"])
item_ui.popup_optim_ui.pushButton_ok.clicked.connect(ok)
item_ui.popup_optim_ui.pushButton_cancel.clicked.connect(cancel)
item_ui.popup_optim_ui.pushButton_reset.clicked.connect(reset)
item_ui.popup_optim.show()
def onLayoutChange(self,app):
#Get the text of the triggered layout
layout_trig = (self.sender().text()).split(" layout")[0]
layout_current = Default_dict["Layout"]
if layout_trig == layout_current:
self.statusbar.showMessage(layout_current+" layout is already in use",2000)
return
elif layout_trig == "Normal":
#Change Layout in Defaultdict to "Normal", such that next start will use Normal layout
Default_dict["Layout"] = "Normal"
app.setStyleSheet("")
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "Dark":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "Dark"
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "DarkOrange":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "DarkOrange"
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def onTooltipOnOff(self,app):
#what is the current layout?
if bool(self.actionLayout_Normal.isChecked())==True: #use normal layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
app.setStyleSheet("")
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
app.setStyleSheet("""QToolTip {
opacity: 0
}""")
elif bool(self.actionLayout_Dark.isChecked())==True: #use dark layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_dark_notooltip.txt")#dir to settings
f = open(dir_layout, "r")#I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionLayout_DarkOrange.isChecked())==True: #use darkorange layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange_notooltip.txt")#dir to settings
f = open(dir_layout, "r")
f = f.read()
app.setStyleSheet(f)
def onIconThemeChange(self):
#Get the text of the triggered icon theme
icontheme_trig = self.sender().text()
icontheme_currenent = Default_dict["Icon theme"]
if icontheme_trig == icontheme_currenent:
self.statusbar.showMessage(icontheme_currenent+" is already in use",2000)
return
elif icontheme_trig == "Icon theme 1":
Default_dict["Icon theme"] = "Icon theme 1"
self.statusbar.showMessage("Icon theme 1 will be used after restart",2000)
elif icontheme_trig == "Icon theme 2":
Default_dict["Icon theme"] = "Icon theme 2"
self.statusbar.showMessage("Icon theme 2 will be used after restart",2000)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def items_clicked(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)#rtdc_ds.hash
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
return SelectedFiles
def items_available(self):
"""
Function grabs all information from table_dragdrop. Checked and Unchecked
Does not load rtdc_ds (save time)
"""
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"NotSpecified","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def items_clicked_no_rtdc_ds(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def uncheck_if_zero(self,item):
#If the Nr. of epochs is changed to zero:
#uncheck the dataset for train/valid
row = item.row()
col = item.column()
#if the user changed Nr. of cells per epoch to zero
if col==6 and int(item.text())==0:
#get the checkstate of the coresponding T/V
cb_t = self.table_dragdrop.item(row, 2)
if cb_t.checkState() == QtCore.Qt.Checked:
cb_t.setCheckState(False)
cb_v = self.table_dragdrop.item(row, 3)
if cb_v.checkState() == QtCore.Qt.Checked:
cb_v.setCheckState(False)
def item_click(self,item):
colPosition = item.column()
rowPosition = item.row()
#if Shuffle was clicked (col=8), check if this checkbox is not deactivated
if colPosition==8:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
if len(self.ram)>0:
self.statusbar.showMessage("Make sure to update RAM (->Edit->Data to RAM now) after changing Data-set",2000)
self.ram = dict() #clear the ram, since the data was changed
self.dataOverviewOn()
#When data is clicked, always reset the validation set (only important for 'Assess Model'-tab)
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
def dataOverviewOn(self):
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def dataOverviewOn_OnChange(self,item):
#When a value is entered in Events/Epoch and enter is hit
#there is no update of the table called
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
rowPosition = item.row()
colPosition = item.column()
if colPosition==6:#one when using the spinbox (Class),or when entering a new number in "Events/Epoch", the table is not updated.
#get the new value
nr_cells = self.table_dragdrop.cellWidget(rowPosition, colPosition)
if nr_cells==None:
return
else:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def update_data_overview(self,SelectedFiles):
#Check if there are custom class names (determined by user)
rows = self.tableWidget_Info.rowCount()
self.classes_custom = [] #by default assume there are no custom classes
classes_custom_bool = False
if rows>0:#if >0, then there is already a table existing
classes,self.classes_custom = [],[]
for row in range(rows):
try:
class_ = self.tableWidget_Info.item(row,0).text()
if class_.isdigit():
classes.append(class_)#get the classes
except:
pass
try:
self.classes_custom.append(self.tableWidget_Info.item(row,3).text())#get the classes
except:
pass
classes = np.unique(classes)
if len(classes)==len(self.classes_custom):#equal in length
same = [i for i, j in zip(classes, self.classes_custom) if i == j] #which items are identical?
if len(same)==0:
#apparently there are custom classes! Save them
classes_custom_bool = True
if len(SelectedFiles)==0:#reset the table
#Table1
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = 2*nr_ind+2 #add two rows for intermediate headers (Train/Valid)
self.tableWidget_Info.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
#self.tableWidget_Info.resizeColumnsToContents()
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Training info
rowPosition = 0
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Train. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
classes = np.unique(indices_train)
if len(classes)==len(self.classes_custom):
classes_custom_bool = True
else:
classes_custom_bool = False
#display information for each individual class
for index_ in range(len(classes)):
#for index in np.unique(indices_train):
index = classes[index_]
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
#Total nr of cells for each class
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
if classes_custom_bool==False:
item.setData(QtCore.Qt.EditRole,str(index))
else:
item.setData(QtCore.Qt.EditRole,self.classes_custom[index_])
self.tableWidget_Info.setItem(rowPosition, 3, item)
rowPosition += 1
#Validation info
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Val. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
rowPosition += 1
self.tableWidget_Info.resizeColumnsToContents()
self.tableWidget_Info.resizeRowsToContents()
def update_data_overview_2(self,SelectedFiles):
if len(SelectedFiles)==0:
#Table2
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Validation info
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def tableWidget_Info_2_click(self,item):
if item is not None:
if item.column()==2:
tableitem = self.tableWidget_Info_2.item(item.row(), item.column())
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
def tableWidget_HistoryItems_dclick(self,item):
if item is not None:
tableitem = self.tableWidget_HistoryItems.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
self.update_historyplot()
def select_all(self,col):
"""
Check/Uncheck items on table_dragdrop
"""
apply_at_col = [2,3,8,10]
if col not in apply_at_col:
return
#otherwiese continue
rows = range(self.table_dragdrop.rowCount()) #Number of rows of the table
tableitems = [self.table_dragdrop.item(row, col) for row in rows]
checkStates = [tableitem.checkState() for tableitem in tableitems]
#Checked?
checked = [state==QtCore.Qt.Checked for state in checkStates]
if set(checked)=={True}:#all are checked!
#Uncheck all!
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Unchecked)
else:#otherwise check all
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Checked)
#If shuffle column was clicked do some extra
if col==8:
for rowPosition in rows:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
#Finally, update the Data-Overview-Box
self.dataOverviewOn()#update the overview box
def item_dclick(self, item):
#Check/Uncheck if item is from column 2 or 3
tableitem = self.table_dragdrop.item(item.row(), item.column())
if item.column() in [2,3]:
#If the item is unchecked ->check it!
if tableitem.checkState() == QtCore.Qt.Unchecked:
tableitem.setCheckState(QtCore.Qt.Checked)
#else, the other way around
elif tableitem.checkState() == QtCore.Qt.Checked:
tableitem.setCheckState(QtCore.Qt.Unchecked)
#Show example image if item on column 0 was dclicked
if item.column() == 0:
#rtdc_path = str(item.text())
#rtdc_path = tableitem.text()
rtdc_path = self.table_dragdrop.cellWidget(item.row(), item.column()).text()
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
nr_images = rtdc_ds["events"]["image"].len()
ind = np.random.randint(0,nr_images)
img = rtdc_ds["events"]["image"][ind]
if len(img.shape)==2:
height, width = img.shape
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.label_image = QtWidgets.QLabel(self.w)
self.label_cropimage = QtWidgets.QLabel(self.w)
#zoom image such that longest side is 512
zoom_factor = np.round(float(512.0/np.max(img.shape)),0)
#Get the order, specified in Image processing->Zoom Order
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Convert to corresponding cv2 zooming method
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_zoomed = cv2.resize(img, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_zoomed.shape
if channels==3:
height, width, _ = img_zoomed.shape
if channels==1:
qi=QtGui.QImage(img_zoomed.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
qi = QtGui.QImage(img_zoomed.data,img_zoomed.shape[1], img_zoomed.shape[0], QtGui.QImage.Format_RGB888)
self.label_image.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_image, 1,1)
#get the location of the cell
rowPosition = item.row()
pix = float(self.table_dragdrop.item(rowPosition, 7).text())
#pix = rtdc_ds.config["imaging"]["pixel size"]
PIX = pix
pos_x,pos_y = rtdc_ds["events"]["pos_x"][ind]/PIX,rtdc_ds["events"]["pos_y"][ind]/PIX
cropsize = self.spinBox_imagecrop.value()
y1 = int(round(pos_y))-cropsize/2
x1 = int(round(pos_x))-cropsize/2
y2 = y1+cropsize
x2 = x1+cropsize
#Crop the image
img_crop = img[int(y1):int(y2),int(x1):int(x2)]
#zoom image such that the height gets the same as for non-cropped img
zoom_factor = float(img_zoomed.shape[0])/img_crop.shape[0]
if zoom_factor == np.inf:
factor = 1
if self.actionVerbose.isChecked()==True:
print("Set resize factor to 1. Before, it was: "+str(factor))
#Get the order, specified in Image processing->Zoom Order
zoom_order = str(self.comboBox_zoomOrder.currentText()) #
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_crop = cv2.resize(img_crop, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_crop.shape
qi=QtGui.QImage(img_crop.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
height, width, _ = img_crop.shape
qi = QtGui.QImage(img_crop.data,width, height, QtGui.QImage.Format_RGB888)
self.label_cropimage.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_cropimage, 1,2)
self.w.show()
def get_norm_from_modelparafile(self):
#Get the normalization method from a modelparafile
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
norm = pd.read_excel(filename,sheet_name='Parameters')["Normalization"]
norm = str(norm[0])
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid normalization method was specified.\
Likely this version of AIDeveloper does not support that normalization method\
Please define a valid normalization method")
msg.setDetailedText("Supported normalization methods are: "+"\n".join(self.norm_methods))
msg.setWindowTitle("Invalid Normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("Invalid Normalization method")
def update_plottingTab(self):
#Get current text of combobox (url to data set)
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
keys = list(rtdc_ds["events"].keys())
#find keys of image_channels
keys_0d,keys_1d,keys_2d = [],[],[]
for key in keys:
if type(rtdc_ds["events"][key])==h5py._hl.dataset.Dataset:
shape = rtdc_ds["events"][key].shape
if len(shape)==1: #zero-dimensional info (single number per cell)
keys_0d.append(key)
elif len(shape)==2: #one-dimensional info (multiple numbers per cell)
keys_1d.append(key)
elif len(shape)==3: #two-dimensional info (images)
keys_2d.append(key)
#add the traces to the 1d features
if "trace" in keys:
for key_trace in list(rtdc_ds["events"]["trace"].keys()):
keys_1d.append(key_trace+" (RTFDC)")
#Sort keys_2d: "image" first; "mask" last
keys_2d.insert(0, keys_2d.pop(keys_2d.index("image")))
keys_2d.insert(len(keys_2d), keys_2d.pop(keys_2d.index("mask")))
#Fill those feautues in the comboboxes at the scatterplot
self.comboBox_featurex.addItems(keys_0d)
self.comboBox_featurey.addItems(keys_0d)
#check if masks or contours are available
cont_available = "mask" in keys or "contour" in keys
self.checkBox_contour.setEnabled(cont_available)
self.checkBox_contour.setChecked(cont_available)
#Centroid is always available (prerequisite for AIDeveloper)
self.checkBox_centroid.setEnabled(True)
self.checkBox_centroid.setChecked(True)
#Intialize option menus
self.contour_options_nr = 0
self.centroid_options_nr = 0
self.show_1d_options_nr = 0
self.show_2d_options_nr = 0
self.init_contour_options(keys_2d)
self.init_centroid_options(keys_1d)
self.init_2d_options(keys_2d)
self.init_1d_options(keys_1d)
def init_contour_options(self,keys_2d):
print("Work in progress")
# self.popup_layercontrols = MyPopup()
# self.popup_layercontrols_ui = frontend.Ui_LayerControl()
# self.popup_layercontrols_ui.setupUi(self.popup_layercontrols,keys_2d) #open a popup
def init_centroid_options(self,keys_image):
print("Work in progress")
# self.popup_centroid_options = MyPopup()
# self.popup_centroid_options_ui = aid_frontend.Ui_centroid_options()
# self.popup_centroid_options_ui.setupUi(self.popup_centroid_options,keys_image) #open a popup
def init_2d_options(self,keys_2d):
#Initialize 2d Option Menu. Range values are saved and manipulated here
self.popup_2dOptions = MyPopup()
self.popup_2dOptions_ui = aid_frontend.Ui_2dOptions()
self.popup_2dOptions_ui.setupUi(self.popup_2dOptions,keys_2d) #open a popup
def init_1d_options(self,keys_1d):
self.popup_1dOptions = MyPopup()
self.popup_1dOptions_ui = aid_frontend.Ui_1dOptions()
self.popup_1dOptions_ui.setupUi(self.popup_1dOptions,keys_1d) #open a popup
def show_contour_options():
self.contour_options_nr += 1
print("Work in progress")
def show_centroid_options(self):
print("Work in progress")
self.centroid_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.centroid_options_nr==1:
for iterator in range(len(self.popup_layercontrols_ui.spinBox_minChX)):
print(1)
def show_2d_options(self):
self.show_2d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_2d_options_nr==1:
for iterator in range(len(self.popup_2dOptions_ui.spinBox_minChX)):
slider = self.popup_2dOptions_ui.horizontalSlider_chX[iterator]
slider.startValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
slider.endValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
comboBox = self.popup_2dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.currentIndexChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_auto_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
self.popup_2dOptions.show()
def show_1d_options(self):
self.show_1d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_1d_options_nr==1:
for iterator in range(len(self.popup_1dOptions_ui.checkBox_show_chX)):
checkBox = self.popup_1dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_line(index=b))
comboBox = self.popup_1dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.clicked.connect(lambda _, b=None: self.put_line(index=b))
self.popup_1dOptions.show()
def activate_deactivate_spinbox(self,newstate):
#get the checkstate of the Input model crop
if newstate==2:
#activate the spinbox
self.spinBox_imagecrop.setEnabled(True)
elif newstate==0:
self.spinBox_imagecrop.setEnabled(False)
def gray_or_rgb_augmentation(self,index):
#When Color-Mode is changed:
#Get the new colormode:
new_colormode = self.colorModes[index]
#when the new Color Mode is Grayscale, disable saturation and hue augmentation
if new_colormode=="Grayscale":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(False)
self.checkBox_saturation.setChecked(False)
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
self.checkBox_hue.setEnabled(False)
self.checkBox_hue.setChecked(False)
self.doubleSpinBox_hueDelta.setEnabled(False)
elif new_colormode=="RGB":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(True)
self.checkBox_saturation.setChecked(True)
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
self.checkBox_hue.setEnabled(True)
self.checkBox_hue.setChecked(True)
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
print("Invalid Color Mode")
def onClick(self,points,pointermethod):
#delete the last item if the user selected already one:
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
if pointermethod=="point":
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.feature_x))
a2 = (clicked_y)/float(np.max(self.feature_y))
#Which is the closest scatter point?
dist = np.sqrt(( a1-self.scatter_x_norm )**2 + ( a2-self.scatter_y_norm )**2)
index = np.argmin(dist)
elif pointermethod=="index":
index = points
clicked_x = self.feature_x[index]
clicked_y = self.feature_y[index]
self.point_clicked = pg.ScatterPlotItem()
self.point_clicked.setData([clicked_x], [clicked_y],brush="r",symbol='o',symbolPen="w",size=15)
self.scatter_xy.addItem(self.point_clicked)
#self.scatter_xy.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
self.point_was_selected_before = True
#I dont care if the user click or used the slider->always adjust spinbox and slider without running the onChange functions
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
self.put_image(index)
self.put_line(index)
def put_image(self,ind):
#check that the user is looking at the plotting tab
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.widget_showCell.removeItem(self.plot_contour)
except:
pass
if ind==None:
index = int(self.spinBox_cellInd.value())
else:
index = ind
rtdc_ds = self.rtdc_ds
#which channel shouldbe displayed
channels = len(self.popup_2dOptions_ui.spinBox_minChX)
keys_2d = [self.popup_2dOptions_ui.label_layername_chX[i].text() for i in range(channels)]
#Define variable on self that carries all image information
if channels==1:
img = np.expand_dims(rtdc_ds["events"]["image"][index],-1)
elif channels>1:
img = np.stack( [rtdc_ds["events"][key][index] for key in keys_2d] ,axis=-1)
if len(img.shape)==2:
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
color_mode = str(self.comboBox_GrayOrRGB_2.currentText())
if color_mode=="Grayscale": #Slider allows to show individual layers: each is shown as grayscale
img = img
elif color_mode == "RGB":#User can define, which layers are shown in R,G,and B
#Retrieve the setting from self.popup_layercontrols_ui
ui_item = self.popup_2dOptions_ui
layer_names = [obj.text() for obj in ui_item.label_layername_chX]
layer_active = [obj.isChecked() for obj in ui_item.checkBox_show_chX]
layer_range = [obj.getRange() for obj in ui_item.horizontalSlider_chX]
layer_auto = [obj.isChecked() for obj in ui_item.checkBox_auto_chX]
layer_cmap = [obj.currentText() for obj in ui_item.comboBox_cmap_chX]
#Assemble the image according to the settings in self.popup_layercontrols_ui
#Find activated layers for each color:
ind_active_r,ind_active_g,ind_active_b = [],[],[]
for ch in range(len(layer_cmap)):
#for color,active in zip(layer_cmap,layer_active):
if layer_cmap[ch]=="Red" and layer_active[ch]==True:
ind_active_r.append(ch)
if layer_cmap[ch]=="Green" and layer_active[ch]==True:
ind_active_g.append(ch)
if layer_cmap[ch]=="Blue" and layer_active[ch]==True:
ind_active_b.append(ch)
if len(ind_active_r)>0:
img_ch = img[:,:,np.array(ind_active_r)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_r)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_r)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_r = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_r = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_g)>0:
img_ch = img[:,:,np.array(ind_active_g)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_g)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_g)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_g = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_g = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_b)>0:
img_ch = img[:,:,np.array(ind_active_b)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_b)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_b)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_b = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_b = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
#Assemble image by stacking all layers
img = np.stack([img_r,img_g,img_b],axis=-1)
#Get the levels of the previous frame
levels_init = self.widget_showCell.getLevels()
if levels_init==(0,1.0):
levels_init = (0,255)
#Get the layer index of the previous frame
index_ = self.widget_showCell.currentIndex
if color_mode=="Grayscale":
self.widget_showCell.setImage(img.T,autoRange=False,levels=levels_init,levelMode="mono")
self.widget_showCell.setCurrentIndex(index_)
elif color_mode=="RGB":
self.widget_showCell.setImage(np.swapaxes(img,0,1))
pix = rtdc_ds.attrs["imaging:pixel size"]
pos_x = rtdc_ds["events"]["pos_x"][index]/pix
pos_y = rtdc_ds["events"]["pos_y"][index]/pix
#Indicate the centroid of the cell
if self.checkBox_centroid.isChecked():
self.dot = pg.CircleROI(pos=(pos_x-2, pos_y-2), size=4, pen=QtGui.QPen(QtCore.Qt.red, 0.1), movable=False)
self.widget_showCell.getView().addItem(self.dot)
self.widget_showCell.show()
if self.checkBox_contour.isChecked():
#get the contour based on the mask
contour,_ = cv2.findContours(rtdc_ds["events"]["mask"][index], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contour[0][:,0,:]
self.plot_contour = pg.PlotCurveItem(contour[:,0],contour[:,1],width=6,pen="r")
self.widget_showCell.getView().addItem(self.plot_contour)
def put_line(self,index):
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
#Fluorescence traces: clear first
try:
self.plot_fl_trace_.clear() #clear the plot
self.plot_fl_trace.clear() #clear the plot
except:
pass
if index==None:
index = int(self.spinBox_cellInd.value())
rtdc_ds = self.rtdc_ds
feature_keys = list(rtdc_ds.keys())
#which features shouldbe displayed
features_nr = len(self.popup_1dOptions_ui.checkBox_show_chX)
keys_1d = [self.popup_1dOptions_ui.checkBox_show_chX[i].text() for i in range(features_nr)]
keys_1d_on = [self.popup_1dOptions_ui.checkBox_show_chX[i].isChecked() for i in range(features_nr)]
colors = [self.popup_1dOptions_ui.comboBox_cmap_chX[i].palette().button().color() for i in range(features_nr)]
colors = [list(c.getRgb()) for c in colors]
colors = [tuple(c) for c in colors]
ind = np.where(np.array(keys_1d_on)==True)[0]
keys_1d = list(np.array(keys_1d)[ind])
colors = list(np.array(colors)[ind])
for key_1d,color in zip(keys_1d,colors):
if key_1d.endswith(" (RTFDC)"):
key_1d = key_1d.split(" (RTFDC)")[0]
trace_flx = rtdc_ds["events"]["trace"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
# if "fl1_max" in feature_keys and "fl1_pos" in feature_keys: #if also the maxima and position of the max are available: use it to put the region accordingly
# fl1_max,fl1_pos = rtdc_ds["events"]["fl1_max"][index],rtdc_ds["events"]["fl1_pos"][index]
else:
values = rtdc_ds["events"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
#get the maximum of [fl1_max,fl2_max,fl3_max] and put the region to the corresponding fl-position
# ind = np.argmax(np.array([fl1_max,fl2_max,fl3_max]))
# region_pos = np.array([fl1_pos,fl2_pos,fl3_pos])[ind] #this region is already given in us. translate this back to range
# peak_height = np.array([fl1_max,fl2_max,fl3_max])[ind]
# sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
# fl_pos_ind = float((sample_rate*region_pos))/1E6 #
# #Indicate the used flx_max and flx_pos by a scatter dot
# self.peak_dot = self.plot_fl_trace.plot([float(fl_pos_ind)], [float(peak_height)],pen=None,symbol='o',symbolPen='w',clear=False)
def onScatterClick(self,event, points):
pointermethod = 'point'
if self.changedbyuser:
self.onClick(points,pointermethod)
def onIndexChange(self,index):
pointermethod = 'index'
if self.changedbyuser:
self.onClick(index,pointermethod)
#Set self.changedbyuser to False and change the spinbox and slider. changedbyuser=False prevents onClick function
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
def updateScatterPlot(self):
#If the Plot is updated, delete the dot in the cell-image
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
self.point_was_selected_before = False
#read url from current comboBox_chooseRtdcFile
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the 'Build' tab to load files first")
msg.setWindowTitle("No file selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
feature_x_name = str(self.comboBox_featurex.currentText())
feature_y_name = str(self.comboBox_featurey.currentText())
features = list(self.rtdc_ds["events"].keys())
if feature_x_name in features:
self.feature_x = self.rtdc_ds["events"][feature_x_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on x axis is not contained in data set")
msg.setWindowTitle("Invalid x feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if feature_y_name in features:
self.feature_y = self.rtdc_ds["events"][feature_y_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on y axis is not contained in data set")
msg.setWindowTitle("Invalid y feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.changedbyuser = True #variable used to prevent plotting if spinbox or slider is changed programmatically
#density estimation
kde = self.comboBox_kde.currentText()
if kde=="None":
brush = "b"
elif kde=="2d Histogram" or kde=="Gauss":
if kde=="2d Histogram":
density = aid_bin.kde_histogram(np.array(self.feature_x), np.array(self.feature_y))
elif kde=="Gauss":
density = aid_bin.kde_gauss(np.array(self.feature_x), np.array(self.feature_y))
density_min,density_max = np.min(density),np.max(density)
density = (density-density_min)/density_max
# define colormap
brush = []
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
cmap = pg.ColorMap(*zip(*Gradients["viridis"]["ticks"]))
for k in density:
brush.append(cmap.mapToQColor(k))
#Add plot
#self.scatter = self.scatter_xy.plot(np.array(self.feature_x), np.array(self.feature_y),symbolPen=None,pen=None,symbol='o',brush=brush[100],clear=True)
#try to remove existing scatterplot
try:
self.scatter_xy.removeItem(self.scatter)
except:
print("Not cleared")
self.scatter = pg.ScatterPlotItem()
self.scatter.setData(np.array(self.feature_x), np.array(self.feature_y),brush=brush,symbolPen=None,pen=None,symbol='o',size=10)
self.scatter_xy.addItem(self.scatter)
#pen=None,symbol='o',symbolPen=None,symbolBrush=density,clear=True)
self.scatter.sigClicked.connect(self.onScatterClick) #When scatterplot is clicked, show the desired cell
#Fill histogram for x-axis; widget_histx
y,x = np.histogram(self.feature_x, bins='auto')
self.hist_x.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
#Manually clear y hist first. Only clear=True did not do the job
self.hist_y.clear()
#Fill histogram for y-axis; widget_histy
y,x = np.histogram(self.feature_y, bins='auto')
curve = pg.PlotCurveItem(-1.*x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150),clear=True)
curve.rotate(-90)
self.hist_y.addItem(curve)
self.scatter_x_norm = (np.array(self.feature_x).astype(np.float32))/float(np.max(self.feature_x))
self.scatter_y_norm = (np.array(self.feature_y).astype(np.float32))/float(np.max(self.feature_y))
#Adjust the horizontalSlider_cellInd and spinBox_cellInd
self.horizontalSlider_cellInd.setSingleStep(1)
self.horizontalSlider_cellInd.setMinimum(0)
self.horizontalSlider_cellInd.setMaximum(len(self.feature_x)-1)
self.spinBox_cellInd.setMinimum(0)
self.spinBox_cellInd.setMaximum(len(self.feature_x)-1)
def selectPeakPos(self):
#Check if self.region exists
#If not, show a message and return:
if not hasattr(self, 'region'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no region defined yet")
msg.setWindowTitle("No region defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Try to get the user defined peak position
if not hasattr(self, 'new_peak'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no peak defined yet")
msg.setWindowTitle("No peak defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#how much rows are already in table?
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+1)
rowPosition = rowcount
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_max"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(self.new_peak["fl_pos"])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"])
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
pos_x_um = float(self.new_peak["pos_x"])*float(self.rtdc_ds.attrs["imaging:pixel size"])
item.setData(QtCore.Qt.EditRole,pos_x_um)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_pos"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["pos_x"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
self.tableWidget_showSelectedPeaks.resizeColumnsToContents()
self.tableWidget_showSelectedPeaks.resizeRowsToContents()
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def selectPeakRange(self):
new_region = self.region.getRegion()
region_width = np.max(new_region) - np.min(new_region) #in [samples]
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
region_width = (float(region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to conver to us
self.region_width = region_width
#put this in the table
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
def onPeaksPlotClick(self,event, points):
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.Pos_x))
a2 = (clicked_y)/float(np.max(self.Fl_pos))
#Which is the closest scatter point?
pos_x_norm = self.Pos_x/np.max(self.Pos_x)#normalized pos_x
fl_pos_norm = self.Fl_pos/np.max(self.Fl_pos)#normalized fl_pos
dist = np.sqrt(( a1-pos_x_norm )**2 + ( a2-fl_pos_norm )**2)
index = np.argmin(dist)
#Highlight this row
self.tableWidget_showSelectedPeaks.selectRow(index)
#Delete the highlighted rows
# try:
# self.actionRemoveSelectedPeaks_function()
# except:
# pass
def update_peak_plot(self):
#This function reads tableWidget_showSelectedPeaks and
#fits a function and
#puts fitting parameters on tableWidget_peakModelParameters
#read the data on tableWidget_showSelectedPeaks
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
Fl_pos,Pos_x = [],[]
for row in range(rowcount):
line = [float(self.tableWidget_showSelectedPeaks.item(row, col).text()) for col in [1,2]] #use the values for [us] and [um]
Fl_pos.append(line[0])
Pos_x.append(line[1])
self.Fl_pos = np.array(Fl_pos)
self.Pos_x = np.array(Pos_x)
self.selectedPeaksPlotPlot = self.selectedPeaksPlot.plot(self.Pos_x, self.Fl_pos,pen=None,symbol='o',symbolPen=None,symbolBrush='b',clear=True)
#if user clicks in the plot, show him the corresponding row in the table
self.selectedPeaksPlotPlot.sigPointsClicked.connect(self.onPeaksPlotClick)
if not hasattr(self, 'region_width'): #if there was no region_width defined yet...
#to get a reasonable initial range, use 20% of the nr. of availeble samples
samples_per_event = self.rtdc_ds.attrs["fluorescence:samples per event"]
self.region_width = 0.2*samples_per_event #width of the region in samples
#Convert to SI unit:
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
self.region_width = (float(self.region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to convert to us
#which model should be used?
if str(self.comboBox_peakDetModel.currentText()) == "Linear dependency and max in range" and len(Pos_x)>1:
slope,intercept = np.polyfit(Pos_x, Fl_pos,deg=1) #Linear FIT, y=mx+n; y=FL_pos[us] x=Pos_x[um]
xlin = np.round(np.linspace(np.min(Pos_x),np.max(Pos_x),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
#Calculate velocity
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
velocity = float(1.0/float(slope))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def addHighestXPctPeaks(self):
#how many x%?
x_pct = float(self.doubleSpinBox_highestXPercent.value())
#Get the flourescence traces and maxima/positions of maxima
#->it could be that the user did not yet load the dataset:
if not hasattr(self,"rtdc_ds"):
#run the function updateScatterPlot()
self.updateScatterPlot()
trace = self.rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys())
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
for i in range(len(fl_keys)):
if "fl1_median" in fl_keys[i] and self.checkBox_fl1.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl1_max.append(trace_flx[ind])
fl1_pos.append(ind)
#Get the x% maxima
fl1_max = np.array(fl1_max)
fl1_pos = np.array(fl1_pos)
sorter = np.argsort(fl1_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl1_max))]
fl1_max = fl1_max[sorter]
fl1_pos = fl1_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl2_median" in fl_keys[i] and self.checkBox_fl2.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl2_max.append(trace_flx[ind])
fl2_pos.append(ind)
#Get the x% maxima
fl2_max = np.array(fl2_max)
fl2_pos = np.array(fl2_pos)
sorter = np.argsort(fl2_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl2_max))]
fl2_max = fl2_max[sorter]
fl2_pos = fl2_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl3_median" in fl_keys[i] and self.checkBox_fl3.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl3_max.append(trace_flx[ind])
fl3_pos.append(ind)
#Get the x% maxima
fl3_max = np.array(fl3_max)
fl3_pos = np.array(fl3_pos)
sorter = np.argsort(fl3_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl3_max))]
fl3_max = fl3_max[sorter]
fl3_pos = fl3_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
#Add fl1 fl2 and fl3 information
flx_max = np.array(list(fl1_max)+list(fl2_max)+list(fl3_max))
flx_pos = np.array(list(fl1_pos)+list(fl2_pos)+list(fl3_pos))
pos_x_um = np.concatenate(np.atleast_2d(np.array(pos_x)))
pix = self.rtdc_ds.attrs["imaging:pixel size"]
pos_x = pos_x_um/pix #convert from um to pix
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+len(flx_max))
for i in range(len(flx_max)):
rowPosition = rowcount+i
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_max[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(flx_pos[i])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"] )
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
#pos_x_um = float(pos_x[i])*float(self.rtdc_ds.config["imaging"]["pixel size"])
item.setData(QtCore.Qt.EditRole,float(pos_x_um[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_pos[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(pos_x[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def savePeakDetModel(self):
#Get tableWidget_peakModelParameters and write it to excel file
#Get filename from user:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(filename)==0:
return
#add the suffix .csv
if not filename.endswith(".xlsx"):
filename = filename +".xlsx"
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
table = self.tableWidget_showSelectedPeaks
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
peaks_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
peaks_df.iloc[i, j] = table.item(i, j).text()
except:
peaks_df.iloc[i, j] = np.nan
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='Model') #initialize empty Sheet
model_df.to_excel(writer,sheet_name='Model') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='Peaks') #initialize empty Sheet
peaks_df.to_excel(writer,sheet_name='Peaks')
writer.save()
writer.close()
def loadPeakDetModel(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
peak_model_df = pd.read_excel(filename,sheet_name='Model')
model = peak_model_df.iloc[0,1]
if model=="Linear dependency and max in range":
#set the combobox accordingly
index = self.comboBox_peakDetModel.findText(model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_peakDetModel.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not find a valid model in the chosen file. Did you accidentially load a session or history file?!")
msg.setWindowTitle("No valid model found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
range_ = float(peak_model_df.iloc[1,1])
intercept = float(peak_model_df.iloc[2,1])
slope = float(peak_model_df.iloc[3,1])
velocity = float(peak_model_df.iloc[4,1])
#put the information in the table
xlin = np.round(np.linspace(np.min(0),np.max(100),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(range_))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def applyPeakModel_and_export(self):
#On which files should the action be performed?
Files = []
if self.radioButton_exportAll.isChecked():
#Grab all items of comboBox_chooseRtdcFile
Files = [self.comboBox_chooseRtdcFile.itemText(i) for i in range(self.comboBox_chooseRtdcFile.count())]
else:
file = self.comboBox_chooseRtdcFile.currentText()
Files.append(str(file))
#Get the model from tableWidget_peakModelParameters
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
model = model_df.iloc[0,1]
if model == "Linear dependency and max in range":
range_us = float(model_df.iloc[1,1]) #[us]
intercept_us = float(model_df.iloc[2,1])
slope_us_um = float(model_df.iloc[3,1])
#velocity_m_s = float(model_df.iloc[4,1])
#Get a directory from the user!
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
if len(folder)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
for rtdc_path in Files:
path, rtdc_file = os.path.split(rtdc_path)
savename = os.path.join(folder,rtdc_file)
#Avoid to save to an existing file:
addon = 1
while os.path.isfile(savename):
savename = savename.split(".rtdc")[0]
if addon>1:
savename = savename.split("_"+str(addon-1))[0]
savename = savename+"_"+str(addon)+".rtdc"
addon += 1
print("Saving to : "+savename)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Convert quantities to [index]
sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
range_ = (range_us*float(sample_rate))/1E6 #range was given in us->Divide by 1E6 to get to s and then multiply by the sample rate
# #check if an rtdc_ds is already chosen:
# if not hasattr(self,'rtdc_ds'):
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("No measurement chosen yet. Use 'Update' button")
# msg.setWindowTitle("No measurement")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
trace = rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys()) #Which traces are available
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
#Iterate over the available cells
pos_x = rtdc_ds["events"]["pos_x"] #is already given in [um]
indices = range(len(pos_x))
if model == "Linear dependency and max in range":
#Use the linear model to get the estimated location of the fluorescence peaks
fl_peak_position_us = intercept_us+slope_us_um*pos_x
#Convert to index
fl_peak_position_ = (fl_peak_position_us*float(sample_rate))/1E6
#Now we have the estimated peak position of each cell. Look at the traces on these spots
def ind_to_us(x):
return x*1E6/sample_rate
#iterate over the cells:
for cellindex in range(len(pos_x)):
#Iterate over the availble traces
for i in range(len(fl_keys)):
if "_median" in fl_keys[i]:
trace_flx = trace[fl_keys[i]][cellindex]
trace_pos = np.array(range(len(trace_flx)))
left = int(fl_peak_position_[cellindex]-range_/2.0)
right = int(fl_peak_position_[cellindex]+range_/2.0)
trace_flx_range = trace_flx[left:right]
trace_pos_range = trace_pos[left:right]
ind = np.argmax(trace_flx_range)
if "fl1_median" in fl_keys[i]:
fl1_max.append(trace_flx_range[ind])
fl1_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl2_median" in fl_keys[i]:
fl2_max.append(trace_flx_range[ind])
fl2_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl3_median" in fl_keys[i]:
fl3_max.append(trace_flx_range[ind])
fl3_pos.append(ind_to_us(trace_pos_range[ind]))
#Save those new fluorescence features into free spots in .rtdc file
#Those names can be found via dclab.dfn.feature_names called (userdef0...userdef9)
#TODO (dont use dclab anymore for saving)
#But just in case anyone uses that function?!
#get metadata of the dataset
meta = {}
# only export configuration meta data (no user-defined config)
for sec in dclab.definitions.CFG_METADATA:
if sec in ["fmt_tdms"]:
# ignored sections
continue
if sec in rtdc_ds.config:
meta[sec] = rtdc_ds.config[sec].copy()
#features = rtdc_ds._events.keys() #Get the names of the online features
compression = 'gzip'
nev = len(rtdc_ds)
#["Overwrite Fl_max and Fl_pos","Save to userdef"]
features = list(rtdc_ds["events"].keys())
if str(self.comboBox_toFlOrUserdef.currentText())=='Save to userdef':
features = features+["userdef"+str(i) for i in range(10)]
with dclab.rtdc_dataset.write_hdf5.write(path_or_h5file=savename,meta=meta, mode="append") as h5obj:
# write each feature individually
for feat in features:
# event-wise, because
# - tdms-based datasets don't allow indexing with numpy
# - there might be memory issues
if feat == "contour":
cont_list = [rtdc_ds["events"]["contour"][ii] for ii in indices]
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"contour": cont_list},
mode="append",
compression=compression)
elif feat == "userdef0":
if "fl1_median" in fl_keys:
print("writing fl1_max to userdef0")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef0": np.array(fl1_max)},
mode="append",
compression=compression)
elif feat == "userdef1":
if "fl2_median" in fl_keys:
print("writing fl2_max to userdef1")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef1": np.array(fl2_max)},
mode="append",
compression=compression)
elif feat == "userdef2":
if "fl3_median" in fl_keys:
print("writing fl3_max to userdef2")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef2": np.array(fl3_max)},
mode="append",
compression=compression)
elif feat == "userdef3":
if "fl1_pos" in features:
print("writing fl1_pos to userdef3")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef3": np.array(fl1_pos)},
mode="append",
compression=compression)
elif feat == "userdef4":
if "fl2_pos" in features:
print("writing fl2_pos to userdef4")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef4": np.array(fl2_pos)},
mode="append",
compression=compression)
elif feat == "userdef5":
if "fl3_pos" in features:
print("writing fl3_pos to userdef5")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef5": np.array(fl3_pos)},
mode="append",
compression=compression)
elif feat in ["userdef"+str(i) for i in range(5,10)]:
pass
elif feat == "fl1_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_max)},mode="append",compression=compression)
elif feat == "fl2_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_max)},mode="append",compression=compression)
elif feat == "fl3_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_max)},mode="append",compression=compression)
elif feat == "fl1_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_pos)},mode="append",compression=compression)
elif feat == "fl2_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_pos)},mode="append",compression=compression)
elif feat == "fl3_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_pos)},mode="append",compression=compression)
elif feat == "index":
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"index": np.array(indices)+1}, #ShapeOut likes to start with index=1
mode="append",
compression=compression)
elif feat in ["mask", "image"]:
# store image stacks (reduced file size and save time)
m = 64
if feat=='mask':
im0 = rtdc_ds["events"][feat][0]
if feat=="image":
im0 = rtdc_ds["events"][feat][0]
imstack = np.zeros((m, im0.shape[0], im0.shape[1]),
dtype=im0.dtype)
jj = 0
if feat=='mask':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
elif feat=='image':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
for ii in range(len(image_list)):
dat = image_list[ii]
#dat = rtdc_ds[feat][ii]
imstack[jj] = dat
if (jj + 1) % m == 0:
jj = 0
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack},
mode="append",
compression=compression)
else:
jj += 1
# write rest
if jj:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack[:jj, :, :]},
mode="append",
compression=compression)
elif feat == "trace":
for tr in rtdc_ds["events"]["trace"].keys():
tr0 = rtdc_ds["events"]["trace"][tr][0]
trdat = np.zeros((nev, tr0.size), dtype=tr0.dtype)
jj = 0
trace_list = [rtdc_ds["events"]["trace"][tr][ii] for ii in indices]
for ii in range(len(trace_list)):
trdat[jj] = trace_list[ii]
jj += 1
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"trace": {tr: trdat}},
mode="append",
compression=compression)
else:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: rtdc_ds["events"][feat][indices]},mode="append")
h5obj.close()
def partialtrainability_activated(self,on_or_off):
if on_or_off==False:#0 means switched OFF
self.lineEdit_partialTrainability.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
#Also, remove the model from self!
self.model_keras = None
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")#put the filename in the lineedit
#this happens when the user activated the expert option "partial trainability"
elif on_or_off==True:#2 means switched ON
#Has the user already chosen a model?
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
#If there is still no model...
if self.model_keras == None:# or self.model_keras_path==None: #if there is no model yet chosen
#Tell the user to initiate a model first!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch off
self.lineEdit_partialTrainability.setText("")
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
self.checkBox_partialTrainability.setChecked(False)
return
#Otherwise, there is a model on self and we can continue :)
#Collections are not supported
if type(self.model_keras)==tuple:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Partial trainability is not available for collections of models. Please specify a single model.</p></body></html>")
msg.setWindowTitle("Collections of models not supported for collections of models")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Switch on lineedit and the button
#self.lineEdit_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
#Load trainability states of the model
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
def partialTrainability(self):
self.popup_trainability = MyPopup()
self.popup_trainability_ui = aid_frontend.popup_trainability()
self.popup_trainability_ui.setupUi(self.popup_trainability) #open a popup to show the layers in a table
#One can only activate this function when there was a model loaded already!
#self.model_keras has to exist!!!
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
if self.model_keras == None: #if there is still no model...
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch this On in the final version
self.lineEdit_partialTrainability.setText("")
self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
return
#Fill information about the model
if self.radioButton_NewModel.isChecked():#a new model is loaded
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("New model")
elif self.radioButton_LoadRestartModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Restart model: "+load_model_path)
elif self.radioButton_LoadContinueModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Continue model: "+load_model_path)
in_dim = self.model_keras.input_shape
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
out_dim = self.model_keras.output_shape[-1]
self.popup_trainability_ui.spinBox_pop_pTr_inpSize.setValue(int(in_dim[1]))
self.popup_trainability_ui.spinBox_pop_pTr_outpSize.setValue(int(out_dim))
if channels==1:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("Grayscale")
elif channels==3:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("RGB")
#Model summary to textBrowser_pop_pTr_modelSummary
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
self.popup_trainability_ui.textBrowser_pop_pTr_modelSummary.setText(summary)
#Work on the tableWidget_pop_pTr_layersTable
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
nr_layers = len(index) #total nr. of dense and conv layers with parameters
for rowNumber in range(nr_layers):
layerindex = index[rowNumber]
columnPosition = 0
layer = self.model_keras.layers[layerindex]
rowPosition = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.rowCount()
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.insertRow(rowPosition)
Name = layer.name
item = QtWidgets.QTableWidgetItem(Name)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
layer_type = layer.__class__.__name__
item = QtWidgets.QTableWidgetItem(layer_type)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 2
Params = layer.count_params()
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, Params)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 3
if layer_type == "Dense":
split_property = "units" #'units' are the number of nodes in dense layers
elif layer_type == "Conv2D":
split_property = "filters"
else:
print("other splitprop!")
return
layer_config = layer.get_config()
nr_units = layer_config[split_property] #units are either nodes or filters for dense and convolutional layer, respectively
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, int(nr_units))
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(self.popup_trainability_ui.tableWidget_pop_pTr_layersTable)
spinb.setMinimum(0)
spinb.setMaximum(1)
spinb.setSingleStep(0.1)
trainability = int(layer.trainable) #.trainable actually returns True or False. Make it integer
spinb.setValue(trainability) #this should be always 1
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setCellWidget(rowPosition, columnPosition, spinb)
self.popup_trainability.show()
#self.popup_trainability_ui.pushButton_pop_pTr_reset.clicked.connect(self.pop_pTr_reset)
self.popup_trainability_ui.pushButton_pop_pTr_update.clicked.connect(self.pop_pTr_update_2)
self.popup_trainability_ui.pushButton_pop_pTr_ok.clicked.connect(self.pop_pTr_ok)
###############Functions for the partial trainability popup################
def pop_pTr_reset(self):
#Reset the model to initial state, with partial trainability
print("Not implemented yet")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Not implemented yet.</p></body></html>")
msg.setWindowTitle("Not implemented")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def pop_pTr_update_1(self):#main worker function
#Apply the requested changes and display updated model in table
pTr_table = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable
#Read the table:
Layer_names,Layer_trainabilities = [],[]
rowCount = pTr_table.rowCount()
for row in range(rowCount):
#Layer_indices.append(str(pTr_table.item(row, 0).text()))
Layer_names.append(str(pTr_table.item(row, 0).text()))
Layer_trainabilities.append(float(pTr_table.cellWidget(row, 4).value()))
Layer_trainabilities = np.array(Layer_trainabilities)
#What are the current trainability statuses of the model
Layer_trainabilities_orig = np.array([self.model_keras.get_layer(l_name).trainable for l_name in Layer_names])
diff = abs( Layer_trainabilities - Layer_trainabilities_orig )
ind = np.where( diff>0 )[0]
#Where do we have a trainability between 0 and 1
#ind = np.where( (Layer_trainabilities>0) & (Layer_trainabilities<1) )[0]
if len(ind)>0:
Layer_trainabilities = list(Layer_trainabilities[ind])
Layer_names = list(np.array(Layer_names)[ind])
#Update the model using user-specified trainabilities
self.model_keras = partial_trainability(self.model_keras,Layer_names,Layer_trainabilities)
#Update lineEdit_partialTrainability
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
else:
print("Nothing to do. All trainabilities are either 0 or 1")
def pop_pTr_update_2(self):#call pop_pTr_update_1 to do the work and then update the window
try:
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table
self.partialTrainability()#Update the popup window by calling the partialTrainability function
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def pop_pTr_ok(self):
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table; If 'Update' was used before, there will not be done work again, but the model is used as it is
#To make the model accessible, it has to be saved to a new .model file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"AIDeveloper model file (*.model)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
return
#add the suffix _session.xlsx
if not fname.endswith(".model"):
fname = fname +".model"
filename = os.path.join(path,fname)
self.model_keras.save(filename)
#Activate 'load and restart' and put this file
#Avoid the automatic popup
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(True)
self.lineEdit_LoadModelPath.setText(filename)#put the filename in the lineedit
#Destroy the window
self.popup_trainability = None
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(tooltips["modelsaved_success"])
msg.setWindowTitle("Sucessfully created and selected model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def lossW_comboB(self,state_nr,listindex):
if listindex==-1:
ui_item = self.popup_lossW_ui
else:
ui_item = self.fittingpopups_ui[listindex].popup_lossW_ui
state_str = ui_item.comboBox_lossW.itemText(int(state_nr))
rows_nr = int(ui_item.tableWidget_lossW.rowCount())
if rows_nr==0:
state_str = "None"
if state_str=="None":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(1.0)
elif state_str=="Custom":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(True)
elif state_str=="Balanced":
#How many cells in total per epoch
events_epoch = [int(ui_item.tableWidget_lossW.item(rowPos,2).text()) for rowPos in range(rows_nr)]
classes = [int(ui_item.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=events_epoch[i]
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights = list(class_weights.values())
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(class_weights[rowPos])
def lossW_ok(self,listindex):
#This happens when the user presses the OK button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
#Which option was used on comboBox_lossW?
state_str = ui_item.popup_lossW_ui.comboBox_lossW.currentText()
if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
elif state_str=="Custom":#User left None. This actually means its off
#There are custom values
#Read the loss values on the table
rows_nr = int(ui_item.popup_lossW_ui.tableWidget_lossW.rowCount())
classes = [int(ui_item.popup_lossW_ui.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
loss_weights = [float(ui_item.popup_lossW_ui.tableWidget_lossW.cellWidget(rowPos,4).value()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=loss_weights[i]
#Put counter (its a dictionary) to lineedit
ui_item.lineEdit_lossW.setText(str(counter))
elif state_str=="Balanced":#Balanced, the values are computed later fresh, even when user changes the cell-numbers again
ui_item.lineEdit_lossW.setText("Balanced")
#Destroy the window
ui_item.popup_lossW = None
def lossW_cancel(self,listindex):
#This happens when the user presses the Cancel button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
if ui_item.lineEdit_lossW.text()=="":
#if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
ui_item.popup_lossW = None
return
#Destroy the window
ui_item.popup_lossW = None
def get_norm_from_manualselection(self):
norm = self.comboBox_w.currentText()
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
def popup_normalization(self):
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.gridLayout_w.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout"))
self.label_w = QtWidgets.QLabel(self.w)
self.label_w.setAlignment(QtCore.Qt.AlignCenter)
self.label_w.setObjectName(_fromUtf8("label_w"))
self.verticalLayout_w.addWidget(self.label_w)
self.horizontalLayout_2_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_2_w.setObjectName(_fromUtf8("horizontalLayout_2"))
self.pushButton_w = QtWidgets.QPushButton(self.w)
self.pushButton_w.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_2_w.addWidget(self.pushButton_w)
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2_w = QtWidgets.QLabel(self.w)
self.label_2_w.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2_w.setObjectName(_fromUtf8("label_2_w"))
self.horizontalLayout_w.addWidget(self.label_2_w)
self.comboBox_w = QtWidgets.QComboBox(self.w)
self.comboBox_w.setObjectName(_fromUtf8("comboBox"))
self.comboBox_w.addItems(["Select"]+self.norm_methods)
self.comboBox_w.setMinimumSize(QtCore.QSize(200,22))
self.comboBox_w.setMaximumSize(QtCore.QSize(200, 22))
width=self.comboBox_w.fontMetrics().boundingRect(max(self.norm_methods, key=len)).width()
self.comboBox_w.view().setFixedWidth(width+10)
self.comboBox_w.currentIndexChanged.connect(self.get_norm_from_manualselection)
self.horizontalLayout_w.addWidget(self.comboBox_w)
self.horizontalLayout_2_w.addLayout(self.horizontalLayout_w)
self.verticalLayout_w.addLayout(self.horizontalLayout_2_w)
self.gridLayout_w.addLayout(self.verticalLayout_w, 0, 0, 1, 1)
self.w.setWindowTitle("Select normalization method")
self.label_w.setText("You are about to continue training a pretrained model\n"
"Please select the meta file of that model to load the normalization method\n"
"or choose the normalization method manually")
self.pushButton_w.setText("Load meta file")
self.label_2_w.setText("Manual \n"
"selection")
#one button that allows to load a meta file containing the norm-method
self.pushButton_w.clicked.connect(self.get_norm_from_modelparafile)
self.w.show()
def action_preview_model(self,enabled):#function runs when radioButton_LoadRestartModel or radioButton_LoadContinueModel was clicked
if enabled:
#if the "Load and restart" radiobutton was clicked:
if self.radioButton_LoadRestartModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model architecture', Default_dict["Path of last model"],"Architecture or model (*.arch *.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
#if the "Load and continue" radiobutton was clicked:
elif self.radioButton_LoadContinueModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model with all parameters', Default_dict["Path of last model"],"Keras model (*.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
self.lineEdit_LoadModelPath.setText(modelname) #Put the filename to the line edit
#Remember the location for next time
if len(str(modelname))>0:
Default_dict["Path of last model"] = os.path.split(modelname)[0]
aid_bin.save_aid_settings(Default_dict)
#If user wants to load and restart a model
if self.radioButton_LoadRestartModel.isChecked():
#load the model and print summary
if modelname.endswith(".arch"):
json_file = open(modelname, 'r')
model_config = json_file.read()
json_file.close()
model_config = json.loads(model_config)
#cut the .json off
modelname = modelname.split(".arch")[0]
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
text1 = "Architecture: loaded from .arch\nWeights: will be randomly initialized'\n"
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
#Otherwise, user wants to load and continue training a model
elif self.radioButton_LoadContinueModel.isChecked():
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
text1 = "Architecture: loaded from .model\nWeights: pretrained weights will be loaded and used when hitting button 'Initialize model!'\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
#
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: loaded Model takes: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked_no_rtdc_ds()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
#aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
text = text1+text2+text3
self.textBrowser_Info.setText(text)
if self.radioButton_LoadContinueModel.isChecked():
#"Load the parameter file of the model that should be continued and apply the same normalization"
#Make a popup: You are about to continue to train a pretrained model
#Please select the parameter file of that model to load the normalization method
#or choose the normalization method manually:
#this is important
self.popup_normalization()
def get_metrics(self,nr_classes):
Metrics = []
f1 = bool(self.checkBox_expertF1.isChecked())
if f1==True:
Metrics.append("f1_score")
precision = bool(self.checkBox_expertPrecision.isChecked())
if precision==True:
Metrics.append("precision")
recall = bool(self.checkBox_expertRecall.isChecked())
if recall==True:
Metrics.append("recall")
metrics = ['accuracy'] + Metrics
metrics = aid_dl.get_metrics_tensors(metrics,nr_classes)
return metrics
def action_set_modelpath_and_name(self):
#Get the path and filename for the new model
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if filename.endswith(".arch"):
filename = filename.split(".arch")[0]
#add the suffix .model
if not filename.endswith(".model"):
filename = filename +".model"
self.lineEdit_modelname.setText(filename)
#Write to Default_dict
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def get_dataOverview(self):
table = self.tableWidget_Info
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
return tmp_df
def action_initialize_model(self,duties="initialize_train"):
"""
duties: which tasks should be performed: "initialize", "initialize_train", "initialize_lrfind"
"""
#print("duties: "+str(duties))
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
# try:
# K.clear_session()
# except:
# print("Could not clear_session (7)")
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#Initialize the model
#######################Load and restart model##########################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname
#load the model and print summary
if load_modelname.endswith(".arch"):
json_file = open(load_modelname, 'r')
model_config = json_file.read()
json_file.close()
model_keras = model_from_json(model_config)
model_config = json.loads(model_config)
text1 = "\nArchitecture: loaded from .arch\nWeights: randomly initialized\n"
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif load_modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(load_modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
model_keras = model_from_config(model_config)
text1 = "\nArchitecture: loaded from .model\nWeights: randomly initialized\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
channels = in_dim[-1] #TensorFlow: channels in last dimension
#Compile model (consider user-specific metrics)
model_metrics = self.get_metrics(out_dim)
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###############Load and continue training the model####################
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname+"\n"
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if load_modelname.endswith(".model"):
#Load the full model
try:
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
except:
K.clear_session() #On linux It happened that there was an error, if another fitting run before
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
#model_config = model_keras.config() #Load the model config (this is the architecture)
#load_modelname = load_modelname.split(".model")[0]
text1 = "Architecture: loaded from .model\nWeights: pretrained weights were loaded\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
chosen_model = str(self.comboBox_ModelSelection.currentText())
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#Check input dimensions
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
in_dim = model_keras.get_input_shape_at(0)
out_dim = model_keras.get_output_shape_at(0)[1]
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###########################New model###################################
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
text0 = load_modelname
#Create a new model!
#Get what the user wants from the dropdown menu!
chosen_model = str(self.comboBox_ModelSelection.currentText())
if chosen_model==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
SelectedFiles = self.items_clicked()
#rtdc_ds = SelectedFiles[0]["rtdc_ds"]
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
channels=1
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
channels=3
indices = [s["class"] for s in SelectedFiles]
indices_unique = np.unique(np.array(indices))
if len(indices_unique)<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Need at least two classes to fit. Please specify .rtdc files and corresponding indeces")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
out_dim = np.max(indices)+1
nr_classes = out_dim
if chosen_model=="None":
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
model_keras = model_zoo.get_model(chosen_model,in_dim,channels,out_dim)
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text1 = "Architecture: created "+chosen_model+" design\nWeights: Initialized random weights\n"
if self.get_color_mode()=="Grayscale":
channels = 1
channel_text = "1 channel (Grayscale)"
elif self.get_color_mode()=="RGB":
channels = 3
channel_text = "3 channels (RGB)"
text2 = "Model Input: "+str(in_dim)+" x "+str(in_dim) + " pixel images and "+channel_text+"\n"
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
else:
#No radio-button was chosen
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons to define the model")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If expert mode is on, apply the requested options
#This affects learning rate, trainability of layers and dropout rate
expert_mode = bool(self.groupBox_expertMode.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy() #get the current optimizer settings
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
model_metrics = self.get_metrics(nr_classes)
if "collection" in chosen_model.lower():
for m in model_keras[1]: #in a collection, model_keras[0] are the names of the models and model_keras[1] is a list of all models
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,self.get_metrics(nr_classes),nr_classes)
if not "collection" in chosen_model.lower():
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
if type(model_keras)==tuple:#when user chose a Collection of models, a tuple is returned by get_model
collection = True
else:
collection = False
if collection==False: #if there is a single model:
#Original learning rate (before expert mode is switched on!)
try:
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
except:
print("Session busy. Try again in fresh session...")
#tf.reset_default_graph() #Make sure to start with a fresh session
K.clear_session()
sess = tf.Session(graph = tf.Graph(), config=config_gpu)
#K.set_session(sess)
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
#Get initial trainability states of model
self.trainable_original, self.layer_names = aid_dl.model_get_trainable_list(model_keras)
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
do_list_original = self.do_list_original
if collection==True: #if there is a collection of models:
#Original learning rate (before expert mode is switched on!)
self.learning_rate_original = [K.eval(model_keras[1][i].optimizer.lr) for i in range(len(model_keras[1]))]
#Get initial trainability states of model
trainable_layerName = [aid_dl.model_get_trainable_list(model_keras[1][i]) for i in range(len(model_keras[1]))]
self.trainable_original = [trainable_layerName[i][0] for i in range(len(trainable_layerName))]
self.layer_names = [trainable_layerName[i][1] for i in range(len(trainable_layerName))]
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = [aid_dl.get_dropout(model_keras[1][i]) for i in range(len(model_keras[1]))]#Get a list of dropout values of the current model
do_list_original = self.do_list_original
#TODO add expert mode ability for collection of models. Maybe define self.model_keras as a list in general. So, fitting a single model is just a special case
if expert_mode==True:
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list=len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
return
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not understand user input at Expert->Dropout")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Learning Rate: Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[1][0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6: #If there is a difference, change lr accordingly
K.set_value(model_keras.optimizer.lr, learning_rate_const)
text_updates += "Learning rate: "+str(lr_current)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[1][0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Optimizer: "+optimizer_expert+"\n"
#Loss function: Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
if collection==True:
if model_keras[1][0].loss!=loss_expert:
recompile = True
text_updates += "Loss function: "+loss_expert+"\n"
if recompile==True:
if collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
print("Recompiling...")
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.model_keras = model_keras #overwrite the model in self
if collection == False:
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
text_new_modelname = "Model will be saved as: "+new_modelname+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
if collection == True:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
new_modelname = os.path.split(new_modelname)
text_new_modelname = "Collection of Models will be saved into: "+new_modelname[0]+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
#Info about normalization method
norm = str(self.comboBox_Normalization.currentText())
text4 = "Input image normalization method: "+norm+"\n"
#Check if there are dropout layers:
#do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
if len(do_list_original)>0:
text4 = text4+"Found "+str(len(do_list_original)) +" dropout layers with rates: "+str(do_list_original)+"\n"
else:
text4 = text4+"Found no dropout layers\n"
if expert_mode==True:
if dropout_expert_on:
text4 = text4+text_do+"\n"
# if learning_rate_expert_on==True:
# if K.eval(model_keras.optimizer.lr) != learning_rate_const: #if the learning rate in UI is NOT equal to the lr of the model...
# text_lr = "Changed the learning rate to: "+ str(learning_rate_const)+"\n"
# text4 = text4+text_lr
text5 = "Model summary:\n"
summary = []
if collection==False:
model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model architecture: serialize to JSON
model_json = model_keras.to_json()
with open(new_modelname.split(".model")[0]+".arch", "w") as json_file:
json_file.write(model_json)
elif collection==True:
if self.groupBox_expertMode.isChecked()==True:
self.groupBox_expertMode.setChecked(False)
print("Turned off expert mode. Not implemented yet for collections of models. This does not affect user-specified metrics (precision/recall/f1)")
self.model_keras_arch_path = [new_modelname[0]+os.sep+new_modelname[1].split(".model")[0]+"_"+model_keras[0][i]+".arch" for i in range(len(model_keras[0]))]
for i in range(len(model_keras[1])):
model_keras[1][i].summary(print_fn=summary.append)
#Save the model architecture: serialize to JSON
model_json = model_keras[1][i].to_json()
with open(self.model_keras_arch_path[i], "w") as json_file:
json_file.write(model_json)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model to a variable on self
self.model_keras = model_keras
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Estimate RAM needed
nr_imgs = np.sum([np.array(list(SelectedFiles)[i]["nr_images"]) for i in range(len(list(SelectedFiles)))])
ram_needed = np.round(nr_imgs * aid_bin.calc_ram_need(cropsize2),2)
if duties=="initialize":#Stop here if the model just needs to be intialized (for expert mode->partial trainability)
return
elif duties=="initialize_train":
#Tell the user if the data is stored and read from ram or not
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the model only be initialized,\
or do you want to start fitting right after? For fitting, data will\
be loaded to RAM (since Edit->Data to RAM is enabled), which will\
require "+str(ram_needed)+"MB of RAM.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Initialize model or initialize and fit model?")
msg.addButton(QtGui.QPushButton('Stop after model initialization'), QtGui.QMessageBox.RejectRole)
msg.addButton(QtGui.QPushButton('Start fitting'), QtGui.QMessageBox.ApplyRole)
retval = msg.exec_()
elif duties=="initialize_lrfind":
retval = 1
else:
print("Invalid duties: "+duties)
return
if retval==0: #yes role: Only initialize model
print("Closing session")
del model_keras
sess.close()
return
elif retval == 1:
if self.actionDataToRam.isChecked():
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Check if there is data already available in RAM
if len(self.ram)==0:#if there is already data stored on ram
print("No data on RAM. I have to load")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
else:
print("There is already some data on RAM")
new_fileinfo = {"SelectedFiles":list(SelectedFiles),"cropsize2":cropsize2,"zoom_factors":zoom_factors,"zoom_order":zoom_order,"color_mode":color_mode}
identical = aid_bin.ram_compare_data(self.ram,new_fileinfo)
if not identical:
#Load the data
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
if identical:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Data was loaded before! Should same data be reused? If not, click 'Reload data', e.g. if you altered the Data-table."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Found data on RAM")
msg.addButton(QtGui.QPushButton('Reuse data'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Reload data'), QtGui.QMessageBox.NoRole)
retval = msg.exec_()
if retval==0:
print("Re-use data")
#Re-use same data
elif retval==1:
print("Re-load data")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
#Finally, activate the 'Fit model' button again
#self.pushButton_FitModel.setEnabled(True)
if duties=="initialize_train":
self.action_fit_model()
if duties=="initialize_lrfind":
self.action_lr_finder()
del model_keras
def action_fit_model_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
listindex = self.popupcounter-1
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
#Take the initialized models
model_keras_path = self.model_keras_path
model_keras = [load_model(model_keras_path[i],custom_objects=aid_dl.get_custom_metrics()) for i in range(len(model_keras_path)) ]
model_architecture_names = self.model_keras[0]
print(model_architecture_names)
#self.model_keras = None
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#self.model_keras = None
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
# model_keras_p = []
# for m in model_keras_p:
# print("Adjusting the model for Multi-GPU")
# model_keras_p.append(multi_gpu_model(m, gpus=gpu_nr)) #indicate the numbers of gpus that you have
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if collection==False and deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==False and deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Single-GPU":
#Switch off the expert tab!
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(False)
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setEnabled(False)
for m in model_keras:
m.compile(loss='categorical_crossentropy',optimizer='adam',metrics=self.get_metrics(nr_classes))#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Multi-GPU":
print("Collection & Multi-GPU is not supported yet")
return
#Original learning rate:
#learning_rate_original = self.learning_rate_original#K.eval(model_keras.optimizer.lr)
#Original trainable states of layers with parameters
trainable_original, layer_names = self.trainable_original, self.layer_names
do_list_original = self.do_list_original
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
new_model = self.radioButton_NewModel.isChecked()
chosen_model = str(self.comboBox_ModelSelection.currentText())
crop = int(self.spinBox_imagecrop.value())
color_mode = str(self.comboBox_GrayOrRGB.currentText())
loadrestart_model = self.radioButton_LoadRestartModel.isChecked()
loadcontinue_model = self.radioButton_LoadContinueModel.isChecked()
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
keras_refresh_nr_epochs = int(self.spinBox_RefreshAfterEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_refresh_nr_epochs = int(self.spinBox_RefreshAfterNrEpochs.value())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
batchSize_expert = int(self.spinBox_batchSize.value())
epochs_expert = int(self.spinBox_epochs.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
learning_rate_const_on = bool(self.radioButton_LrConst.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.lineEdit_cycLrMin.text())
cycLrMax = float(self.lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.comboBox_cycLrMethod.currentText())
#clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy()
cycLrGamma = self.clr_settings["gamma"]
SelectedFiles = self.items_clicked()#to compute cycLrStepSize, the number of training images is needed
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,self.clr_settings["step_size"],batchSize_expert)
#put clr_settings onto fittingpopup,
self.fittingpopups_ui[listindex].clr_settings = self.clr_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
#put optimizer_settings onto fittingpopup,
self.fittingpopups_ui[listindex].optimizer_settings = self.optimizer_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
learning_rate_expo_on = bool(self.radioButton_LrExpo.isChecked())
expDecInitLr = float(self.doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.spinBox_expDecSteps.value())
expDecRate = float(self.doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy()#make a copy to make sure that changes in the UI are not immediately used
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.checkBox_lossW.isChecked())
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
#SelectedFiles = self.items_clicked()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
self.fittingpopups_ui[listindex].SelectedFiles = SelectedFiles #save to self. to make it accessible for popup showing loss weights
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print("class_weight:" +str(class_weight))
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
#Get callback for the learning rate scheduling
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#save a dictionary with initial values
lr_dict_original = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if collection==False:
#Create an excel file
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
elif collection==True:
SelectedFiles_df = pd.DataFrame(SelectedFiles)
Writers = []
#Create excel files
for i in range(len(model_keras_path)):
writer = pd.ExcelWriter(model_keras_path[i].split(".model")[0]+'_meta.xlsx', engine='openpyxl')
Writers.append(writer)
for writer in Writers:
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
###############################Expert Mode values##################
expert_mode_before = False #There was no expert mode used before.
if expert_mode==True:
#activate groupBox_expertMode_pop
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(True)
expert_mode_before = True
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
#Check if model has to be compiled again
recompile = False #by default, dont recompile (happens for "Load and continue" training a model)
if new_model==True:
recompile = True
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model on self
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#read self.ram to new variable ; next clear ram. This is required for multitasking (training multiple models with maybe different data)
DATA = self.ram
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(DATA)))
#clear the ram again if desired
if not self.actionKeep_Data_in_RAM.isChecked():
self.ram = dict()
print("Removed data from self.ram. For further training sessions, data has to be reloaded.")
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
#if Data_to_RAM was not enabled:
#if not self.actionDataToRam.isChecked():
if len(DATA)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
# else: #get a similar generator, using the ram-data
# if len(DATA)==0:
# gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
Para_dict = pd.DataFrame()
def update_para_dict():
#Document changes in the meta-file
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict["Modelname"]=new_modelname,
Para_dict["Chosen Model"]=chosen_model,
Para_dict["new_model"]=new_model,
Para_dict["loadrestart_model"]=loadrestart_model,
Para_dict["loadcontinue_model"]=loadcontinue_model,
Para_dict["Continued_Fitting_From"]=load_modelname,
Para_dict["Input image size"]=crop,
Para_dict["Color Mode"]=color_mode,
Para_dict["Zoom order"]=zoom_order,
Para_dict["Device"]=deviceSelected,
Para_dict["gpu_used"]=gpu_used,
Para_dict["gpu_memory"]=gpu_memory,
Para_dict["Output Nr. classes"]=nr_classes,
Para_dict["Normalization"]=norm,
Para_dict["Nr. epochs"]=nr_epochs,
Para_dict["Keras refresh after nr. epochs"]=keras_refresh_nr_epochs,
Para_dict["Horz. flip"]=h_flip,
Para_dict["Vert. flip"]=v_flip,
Para_dict["rotation"]=rotation,
Para_dict["width_shift"]=width_shift,
Para_dict["height_shift"]=height_shift,
Para_dict["zoom"]=zoom,
Para_dict["shear"]=shear,
Para_dict["Brightness refresh after nr. epochs"]=brightness_refresh_nr_epochs,
Para_dict["Brightness add. lower"]=brightness_add_lower,
Para_dict["Brightness add. upper"]=brightness_add_upper,
Para_dict["Brightness mult. lower"]=brightness_mult_lower,
Para_dict["Brightness mult. upper"]=brightness_mult_upper,
Para_dict["Gaussnoise Mean"]=gaussnoise_mean,
Para_dict["Gaussnoise Scale"]=gaussnoise_scale,
Para_dict["Contrast on"]=contrast_on,
Para_dict["Contrast Lower"]=contrast_lower,
Para_dict["Contrast Higher"]=contrast_higher,
Para_dict["Saturation on"]=saturation_on,
Para_dict["Saturation Lower"]=saturation_lower,
Para_dict["Saturation Higher"]=saturation_higher,
Para_dict["Hue on"]=hue_on,
Para_dict["Hue delta"]=hue_delta,
Para_dict["Average blur on"]=avgBlur_on,
Para_dict["Average blur Lower"]=avgBlur_min,
Para_dict["Average blur Higher"]=avgBlur_max,
Para_dict["Gauss blur on"]=gaussBlur_on,
Para_dict["Gauss blur Lower"]=gaussBlur_min,
Para_dict["Gauss blur Higher"]=gaussBlur_max,
Para_dict["Motion blur on"]=motionBlur_on,
Para_dict["Motion blur Kernel"]=motionBlur_kernel,
Para_dict["Motion blur Angle"]=motionBlur_angle,
Para_dict["Epoch_Started_Using_These_Settings"]=counter,
Para_dict["expert_mode"]=expert_mode,
Para_dict["batchSize_expert"]=batchSize_expert,
Para_dict["epochs_expert"]=epochs_expert,
Para_dict["learning_rate_expert_on"]=learning_rate_expert_on,
Para_dict["learning_rate_const_on"]=learning_rate_const_on,
Para_dict["learning_rate_const"]=learning_rate_const,
Para_dict["learning_rate_cycLR_on"]=learning_rate_cycLR_on,
Para_dict["cycLrMin"]=cycLrMin,
Para_dict["cycLrMax"]=cycLrMax,
Para_dict["cycLrMethod"] = cycLrMethod,
Para_dict["clr_settings"] = self.fittingpopups_ui[listindex].clr_settings,
Para_dict["learning_rate_expo_on"]=learning_rate_expo_on,
Para_dict["expDecInitLr"]=expDecInitLr,
Para_dict["expDecSteps"]=expDecSteps,
Para_dict["expDecRate"]=expDecRate,
Para_dict["loss_expert_on"]=loss_expert_on,
Para_dict["loss_expert"]=loss_expert,
Para_dict["optimizer_expert_on"]=optimizer_expert_on,
Para_dict["optimizer_expert"]=optimizer_expert,
Para_dict["optimizer_settings"]=optimizer_settings,
Para_dict["paddingMode"]=paddingMode,
Para_dict["train_last_layers"]=train_last_layers,
Para_dict["train_last_layers_n"]=train_last_layers_n,
Para_dict["train_dense_layers"]=train_dense_layers,
Para_dict["dropout_expert_on"]=dropout_expert_on,
Para_dict["dropout_expert"]=dropout_expert,
Para_dict["lossW_expert_on"]=lossW_expert_on,
Para_dict["lossW_expert"]=lossW_expert,
Para_dict["class_weight"]=class_weight,
Para_dict["metrics"]=model_metrics,
#training data cannot be changed during training
if norm == "StdScaling using mean and std of all training data":
#This needs to be saved into Para_dict since it will be required for inference
Para_dict["Mean of training data used for scaling"]=mean_trainingdata,
Para_dict["Std of training data used for scaling"]=std_trainingdata,
if collection==False:
if counter == 0:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters')
else:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters',startrow=self.fittingpopups_ui[listindex].writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH)#change to read/write
try:
self.fittingpopups_ui[listindex].writer.save()
except:
pass
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)#change to only readable
if collection==True:
for i in range(len(Writers)):
Para_dict["Chosen Model"]=model_architecture_names[i],
writer = Writers[i]
if counter==0:
Para_dict.to_excel(Writers[i],sheet_name='Parameters')
else:
Para_dict.to_excel(writer,sheet_name='Parameters',startrow=writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
try:
writer.save()
except:
pass
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #read only
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
#Save the validation set (BEFORE normalization!)
#Write to.rtdc files
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Use a different Exporting option in ->Edit if you want to export the data")
# msg.setWindowTitle("Export is turned off!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if not bool(self.actionExport_Off.isChecked())==True:
#Save the labels
np.savetxt(new_modelname.split(".model")[0]+'_Valid_Labels.txt',y_valid.astype(int),fmt='%i')
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
#get it to theano image format (channels first)
#X_valid = X_valid.swapaxes(-1,-2).swapaxes(-2,-3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
####################Update the PopupFitting########################
self.fittingpopups_ui[listindex].lineEdit_modelname_pop.setText(new_modelname) #set the progress bar to zero
self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.setValue(crop)
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(nr_epochs)
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.addItems(self.predefined_models)
chosen_model = str(self.comboBox_ModelSelection.currentText())
index = self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.addItems(self.norm_methods)
index = self.fittingpopups_ui[listindex].comboBox_Normalization_pop.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.setCurrentIndex(index)
#padding
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
#zoom_order
self.fittingpopups_ui[listindex].comboBox_zoomOrder.setCurrentIndex(zoom_order)
#CPU setting
self.fittingpopups_ui[listindex].comboBox_cpu_pop.addItem("Default CPU")
if gpu_used==False:
self.fittingpopups_ui[listindex].radioButton_cpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
#GPU setting
if gpu_used==True:
self.fittingpopups_ui[listindex].radioButton_gpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].comboBox_gpu_pop.addItem(deviceSelected)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.setValue(keras_refresh_nr_epochs)
self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.setChecked(h_flip)
self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.setChecked(v_flip)
self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.setText(str(rotation))
self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.setText(str(width_shift))
self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.setText(str(height_shift))
self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.setText(str(zoom))
self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.setText(str(shear))
self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.setValue(brightness_refresh_nr_epochs)
self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.setValue(brightness_add_lower)
self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.setValue(brightness_add_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.setValue(brightness_mult_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.setValue(brightness_mult_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.setValue(gaussnoise_mean)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.setValue(gaussnoise_scale)
self.fittingpopups_ui[listindex].checkBox_contrast_pop.setChecked(contrast_on)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.setValue(contrast_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.setValue(contrast_higher)
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setChecked(saturation_on)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setValue(saturation_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setValue(saturation_higher)
self.fittingpopups_ui[listindex].checkBox_hue_pop.setChecked(hue_on)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setValue(hue_delta)
#Special for saturation and hue. Only enabled for RGB:
saturation_enabled = bool(self.checkBox_saturation.isEnabled())
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setEnabled(saturation_enabled)
hue_enabled = bool(self.checkBox_hue.isEnabled())
self.fittingpopups_ui[listindex].checkBox_hue_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.setChecked(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setValue(avgBlur_min)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setValue(avgBlur_max)
self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.setChecked(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setValue(gaussBlur_min)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setValue(gaussBlur_max)
self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.setChecked(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurAngle_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setEnabled(motionBlur_on)
if len(motionBlur_kernel)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0])+","+str(motionBlur_kernel[1]))
if len(motionBlur_angle)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0])+","+str(motionBlur_angle[1]))
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(expert_mode)
self.fittingpopups_ui[listindex].spinBox_batchSize.setValue(batchSize_expert)
self.fittingpopups_ui[listindex].spinBox_epochs.setValue(epochs_expert)
self.fittingpopups_ui[listindex].groupBox_learningRate_pop.setChecked(learning_rate_expert_on)
self.fittingpopups_ui[listindex].radioButton_LrConst.setChecked(learning_rate_const_on)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
self.fittingpopups_ui[listindex].radioButton_LrCycl.setChecked(learning_rate_cycLR_on)
self.fittingpopups_ui[listindex].lineEdit_cycLrMin.setText(str(cycLrMin))
self.fittingpopups_ui[listindex].lineEdit_cycLrMax.setText(str(cycLrMax))
index = self.fittingpopups_ui[listindex].comboBox_cycLrMethod.findText(cycLrMethod, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_cycLrMethod.setCurrentIndex(index)
self.fittingpopups_ui[listindex].radioButton_LrExpo.setChecked(learning_rate_expo_on)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.setValue(expDecInitLr)
self.fittingpopups_ui[listindex].spinBox_expDecSteps.setValue(expDecSteps)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.setValue(expDecRate)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.findText(loss_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_optimizer_pop.setChecked(optimizer_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_optimizer.findText(optimizer_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_optimizer.setCurrentIndex(index)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.setChecked(train_last_layers)
self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.setValue(train_last_layers_n)
self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.setChecked(train_dense_layers)
self.fittingpopups_ui[listindex].checkBox_dropout_pop.setChecked(dropout_expert_on)
do_text = [str(do_i) for do_i in dropout_expert]
self.fittingpopups_ui[listindex].lineEdit_dropout_pop.setText((', '.join(do_text)))
self.fittingpopups_ui[listindex].checkBox_lossW.setChecked(lossW_expert_on)
self.fittingpopups_ui[listindex].pushButton_lossW.setEnabled(lossW_expert_on)
self.fittingpopups_ui[listindex].lineEdit_lossW.setText(str(lossW_expert))
if channels==1:
channel_text = "Grayscale"
elif channels==3:
channel_text = "RGB"
self.fittingpopups_ui[listindex].comboBox_colorMode_pop.addItems([channel_text])
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Dictionary defining affine image augmentation options:
aug_paras = {"v_flip":v_flip,"h_flip":h_flip,"rotation":rotation,"width_shift":width_shift,"height_shift":height_shift,"zoom":zoom,"shear":shear}
Histories,Index,Saved,Stopwatch,LearningRate = [],[],[],[],[]
if collection==True:
HISTORIES = [ [] for model in model_keras]
SAVED = [ [] for model in model_keras]
counter = 0
saving_failed = False #when saving fails, this becomes true and the user will be informed at the end of training
#Save the initial values (Epoch 1)
update_para_dict()
model_metrics_names = []
for met in model_metrics:
if type(met)==str:
model_metrics_names.append(met)
else:
metname = met.name
metlabel = met.label
if metlabel>0:
metname = metname+"_"+str(metlabel)
model_metrics_names.append(metname)
#Dictionary for records in metrics
model_metrics_records = {}
model_metrics_records["acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["val_acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["loss"] = 9E20 ##loss starts very high and approaches 0 during training
model_metrics_records["val_loss"] = 9E20 ##loss starts very high and approaches 0 during training
for key in model_metrics_names:
if 'precision' in key or 'recall' in key or 'f1_score' in key:
model_metrics_records[key] = 0 #those metrics start at zero and approach 1
model_metrics_records["val_"+key] = 0 #those metrics start at zero and approach 1
gen_train_refresh = False
time_start = time.time()
t1 = time.time() #Initialize a timer; this is used to save the meta file every few seconds
t2 = time.time() #Initialize a timer; this is used update the fitting parameters
while counter < nr_epochs:#nr_epochs: #resample nr_epochs times
#Only keep fitting if the respective window is open:
isVisible = self.fittingpopups[listindex].isVisible()
if isVisible:
############Keras image augmentation#####################
#Start the first iteration:
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(DATA)==0 or gen_train_refresh:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
gen_train_refresh = False
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Some parallellization: use nr_threads (number of CPUs)
nr_threads = 1 #Somehow for MNIST and CIFAR, processing always took longer for nr_threads>1 . I tried nr_threads=2,4,8,16,24
if nr_threads == 1:
X_batch = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_batch = np.copy(y_train)
else:
#Divde data in 4 batches
X_train = np.array_split(X_train,nr_threads)
y_train = np.array_split(y_train,nr_threads)
self.X_batch = [False] * nr_threads
self.y_batch = [False] * nr_threads
self.counter_aug = 0
self.Workers_augm = []
def imgaug_worker(aug_paras,progress_callback,history_callback):
i = aug_paras["i"]
self.X_batch[i] = aid_img.affine_augm(aug_paras["X_train"],v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear)
self.y_batch[i] = aug_paras["y_train"]
self.counter_aug+=1
t3_a = time.time()
for i in range(nr_threads):
aug_paras_ = copy.deepcopy(aug_paras)
aug_paras_["i"] = i
aug_paras_["X_train"]=X_train[i]#augparas contains rotation and so on. X_train and y_train are overwritten in each iteration (for each worker new X_train)
aug_paras_["y_train"]=y_train[i]
self.Workers_augm.append(Worker(imgaug_worker,aug_paras_))
self.threadpool.start(self.Workers_augm[i])
while self.counter_aug < nr_threads:
time.sleep(0.01)#Wait 0.1s, then check the len again
t3_b = time.time()
if verbose == 1:
print("Time to perform affine augmentation_internal ="+str(t3_b-t3_a))
X_batch = np.concatenate(self.X_batch)
y_batch = np.concatenate(self.y_batch)
Y_batch = np_utils.to_categorical(y_batch, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
# if verbose == 1:
# print("Time to crop to final size="+str(t4-t3))
X_batch_orig = np.copy(X_batch) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
keras_iter_counter = 0
while keras_iter_counter < keras_refresh_nr_epochs and counter < nr_epochs:
keras_iter_counter+=1
#if t2-t1>5: #check for changed settings every 5 seconds
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Another while loop if the user wants to reuse the keras-augmented data
#several times and only apply brightness augmentation:
brightness_iter_counter = 0
while brightness_iter_counter < brightness_refresh_nr_epochs and counter < nr_epochs:
#In each iteration, start with non-augmented data
X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
X_batch = X_batch.astype(np.uint8)
#########X_batch = X_batch.astype(float)########## No float yet :) !!!
brightness_iter_counter += 1
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
if self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.isChecked():
nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_NrEpochs.value())
#Keras stuff
keras_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.value())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
#Brightness stuff
brightness_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.value())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
#Expert mode stuff
expert_mode = bool(self.fittingpopups_ui[listindex].groupBox_expertMode_pop.isChecked())
batchSize_expert = int(self.fittingpopups_ui[listindex].spinBox_batchSize.value())
epochs_expert = int(self.fittingpopups_ui[listindex].spinBox_epochs.value())
learning_rate_expert_on = bool(self.fittingpopups_ui[listindex].groupBox_learningRate_pop.isChecked())
learning_rate_const_on = bool(self.fittingpopups_ui[listindex].radioButton_LrConst.isChecked())
learning_rate_const = float(self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.fittingpopups_ui[listindex].radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMin.text())
cycLrMax = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.fittingpopups_ui[listindex].comboBox_cycLrMethod.currentText())
clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,clr_settings["step_size"],batchSize_expert)
cycLrGamma = clr_settings["gamma"]
learning_rate_expo_on = bool(self.fittingpopups_ui[listindex].radioButton_LrExpo.isChecked())
expDecInitLr = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.fittingpopups_ui[listindex].spinBox_expDecSteps.value())
expDecRate = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.isChecked())
loss_expert = str(self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.currentText())
optimizer_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_optimizer_pop.isChecked())
optimizer_expert = str(self.fittingpopups_ui[listindex].comboBox_optimizer.currentText())
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
paddingMode_ = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText())
print("paddingMode_:"+str(paddingMode_))
if paddingMode_ != paddingMode:
print("Changed the padding mode!")
gen_train_refresh = True#otherwise changing paddingMode will not have any effect
paddingMode = paddingMode_
train_last_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.isChecked())
train_last_layers_n = int(self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.value())
train_dense_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.isChecked())
dropout_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_dropout_pop.isChecked())
try:
dropout_expert = str(self.fittingpopups_ui[listindex].lineEdit_dropout_pop.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_lossW.isChecked())
lossW_expert = str(self.fittingpopups_ui[listindex].lineEdit_lossW.text())
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
print("Updating parameter file (meta.xlsx)!")
update_para_dict()
#Changes in expert mode can affect the model: apply changes now:
if expert_mode==True:
if collection==False: #Expert mode is currently not supported for Collections
expert_mode_before = True
#Apply changes to the trainable states:
if train_last_layers==True:#Train only the last n layers
if verbose:
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
if verbose:
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
#Change dropout. Model .compile happens inside change_dropout function
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to changed dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
if verbose:
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
if learning_rate_expert_on==True:
#get the current lr_dict
lr_dict_now = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if not lr_dict_now.equals(lr_dict_original):#in case the dataframes dont equal...
#generate a new callback
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#update lr_dict_original
lr_dict_original = lr_dict_now.copy()
else:
callback_lr = None
if optimizer_expert_on==True:
optimizer_settings_now = self.fittingpopups_ui[listindex].optimizer_settings.copy()
if not optimizer_settings_now == optimizer_settings:#in case the dataframes dont equal...
#grab these new optimizer values
optimizer_settings = optimizer_settings_now.copy()
############################Invert 'expert' settings#########################
if expert_mode==False and expert_mode_before==True: #if the expert mode was selected before, change the parameters back to original vlaues
if verbose:
print("Expert mode was used before and settings are now inverted")
#Re-set trainable states back to original state
if verbose:
print("Change 'trainable' layers back to original state")
summary = aid_dl.model_change_trainability(model_keras,trainable_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change 'trainable' layers back to original state")
text1 = "Expert mode turns off: Request for orignal trainability states:\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if verbose:
print("Change dropout rates in dropout layers back to original values")
callback_lr = None#remove learning rate callback
if verbose:
print("Set learning rate callback to None")
if len(do_list_original)>0:
do_changed = aid_dl.change_dropout(model_keras,do_list_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout values back to original state. I'm not sure if this works!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to original values: "+str(do_list_original)
else:
text_do = "Dropout rate(s) in model was/were not changed"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do+"\n")
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection==False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection==False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
else:
K.set_value(model_keras[0].optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
loss_ = model_keras.loss
else:
loss_ = model_keras[0].loss
if loss_!=loss_expert:
recompile = True
model_metrics_records["loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
model_metrics_records["val_loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True and collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change optimizer, loss and learninig rate.")
elif recompile==True and collection==True:
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
print("Altering learning rate is not suported for collections (yet)")
return
print("Recompiling...")
for m in model_keras:
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model in self
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(False)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_batch = X_batch.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
#Fitting can be paused
while str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
time.sleep(2) #wait 2 seconds and then check the text on the button again
if verbose == 1:
print("X_batch.shape")
print(X_batch.shape)
if xtra_in==True:
print("Add Xtra Data to X_batch")
X_batch = [X_batch,xtra_train]
#generate a list of callbacks, get empty list if callback_lr is none
callbacks = []
if callback_lr!=None:
callbacks.append(callback_lr)
###################################################
###############Actual fitting######################
###################################################
if collection==False:
if model_keras_p == None:
history = model_keras.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
elif model_keras_p != None:
history = model_keras_p.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
Histories.append(history.history)
Stopwatch.append(time.time()-time_start)
learningrate = K.get_value(history.model.optimizer.lr)
LearningRate.append(learningrate)
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved" )
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved")
#self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
if record_broken:#if any record was broken...
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
#Save the model
text = "Save model to following directory: \n"+os.path.dirname(new_modelname)
print(text)
if os.path.exists(os.path.dirname(new_modelname)):
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Record was broken -> saved model"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:#in case the folder does not exist (anymore), create a folder in temp
#what is the foldername of the model?
text = "Saving failed. Create folder in temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text = "Your temp. folder is here: "+str(temp_path)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
parentfolder = aid_bin.splitall(new_modelname)[-2]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it not exists already
if not os.path.exists(os.path.join(temp_path,parentfolder)):
text = "Create folder in temp:\n"+os.path.join(temp_path,parentfolder)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
os.mkdir(os.path.join(temp_path,parentfolder))
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,parentfolder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Save the model
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Model saved successfully to temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Also update the excel writer!
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
Saved.append(1)
#Also save the model upon user-request
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
Saved.append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
Saved.append(0)
elif collection==True:
for i in range(len(model_keras)):
#Expert-settings return automatically to default values when Expert-mode is unchecked
history = model_keras[i].fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
HISTORIES[i].append(history.history)
learningrate = K.get_value(history.model.optimizer.lr)
print("model_keras_path[i]")
print(model_keras_path[i])
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#one could 'break' here, but I want to update all records
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#For collections of models:
if record_broken:
#Save the model
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
SAVED[i].append(0)
callback_progessbar = float(counter)/nr_epochs
progress_callback.emit(100.0*callback_progessbar)
history_emit = history.history
history_emit["LearningRate"] = [learningrate]
history_callback.emit(history_emit)
Index.append(counter)
t2 = time.time()
if collection==False:
if counter==0:
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#If this runs the first time, create the file with header
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s)"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#self.fittingpopups_ui[listindex].backup.append({"DF1":DF1})
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
#elif counter%50==0: #otherwise save the history to excel after each n epochs
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#Saving
if os.path.exists(os.path.dirname(new_modelname)):#check if folder is (still) available
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s to directory:\n)"+new_modelname
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
t1 = time.time()
else:#If folder not available, create a folder in temp
text = "Failed to save meta.xlsx. -> Create folder in temp\n"
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text += "Your temp folder is here: "+str(temp_path)+"\n"
folder = os.path.split(new_modelname)[-2]
folder = os.path.split(folder)[-1]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it does'nt exist already
if not os.path.exists(os.path.join(temp_path,folder)):
os.mkdir(os.path.join(temp_path,folder))
text +="Created directory in temp:\n"+os.path.join(temp_path,folder)
print(text)
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,folder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"#reset textcolor to black
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#update the excel writer
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
print("There is already such a file...AID will add new data to it. Please check if this is OK")
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
if collection==True:
if counter==0:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#If this runs the first time, create the file with header
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(Writers[i],sheet_name='History')
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index = []#reset the Index list
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#Saving
#TODO: save to temp, if harddisk not available to prevent crash.
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(Writers[i],sheet_name='History', startrow=Writers[i].sheets['History'].max_row,header= False)
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
print("meta.xlsx was saved")
t1 = time.time()
Index = []#reset the Index list
counter+=1
progress_callback.emit(100.0)
#If the original storing locating became inaccessible (folder name changed, HD unplugged...)
#the models and meta are saved to temp folder. Inform the user!!!
if saving_failed==True:
path_orig = str(self.fittingpopups_ui[listindex].lineEdit_modelname_pop.text())
text = "<html><head/><body><p>Original path:<br>"+path_orig+\
"<br>became inaccessible during training! Files were then saved to:<br>"+\
new_modelname.split(".model")[0]+"<br>To bring both parts back together\
, you have manually open the meta files (excel) and copy;paste each sheet. \
Sorry for the inconvenience.<br>If that happens often, you may contact \
the main developer and ask him to improve that.</p></body></html>"
text = "<span style=\' font-weight:600; color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
print('\a')#make a noise
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.setStyleSheet("background-color: yellow;")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.moveCursor(QtGui.QTextCursor.End)
if collection==False:
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
Index = []#reset the Index list
Histories = []#reset the Histories list
Saved = []
#does such a file exist already? append!
if not os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(writer,sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
writer.save()
writer.close()
if collection==True:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#does such a file exist already? append!
if not os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(Writers[i],sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
Writers[i].save()
Writers[i].close()
Index = []#reset the Index list
sess.close()
# try:
# aid_dl.reset_keras(model_keras)
# except:
# pass
def action_fit_model(self):
#Take the initialized model
#Unfortunately, in TensorFlow it is not possile to pass a model from
#one thread to another. Therefore I have to load and save the models each time :(
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Model could not be initialized")
# msg.setWindowTitle("Error")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#There should be at least two outputs (index 0 and 1)
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras[1][0].get_config()#["layers"]
nr_classes = int(model_keras[1][0].output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if collection==False:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
if collection==True:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = [new_modelname.split(".model")[0]+"_"+model_keras[0][i]+".model" for i in range(len(model_keras[0]))]
for i in range(len(self.model_keras_path)):
#save a first version of the .model
model_keras[1][i].save(self.model_keras_path[i])
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
###################Popup Window####################################
self.fittingpopups.append(MyPopup())
ui = aid_frontend.Fitting_Ui()
ui.setupUi(self.fittingpopups[-1]) #append the ui to the last element on the list
self.fittingpopups_ui.append(ui)
# Increase the popupcounter by one; this will help to coordinate the data flow between main ui and popup
self.popupcounter += 1
listindex=self.popupcounter-1
##############################Define functions#########################
self.fittingpopups_ui[listindex].pushButton_UpdatePlot_pop.clicked.connect(lambda: self.update_historyplot_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Stop_pop.clicked.connect(lambda: self.stop_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.clicked.connect(lambda: self.pause_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveTextWindow_pop.clicked.connect(lambda: self.saveTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_clearTextWindow_pop.clicked.connect(lambda: self.clearTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_showModelSumm_pop.clicked.connect(lambda: self.showModelSumm_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveModelSumm_pop.clicked.connect(lambda: self.saveModelSumm_pop(listindex))
#Expert mode functions
#self.fittingpopups_ui[listindex].checkBox_pTr_pop.toggled.connect(lambda on_or_off: self.partialtrainability_activated_pop(on_or_off,listindex))
self.fittingpopups_ui[listindex].pushButton_lossW.clicked.connect(lambda: self.lossWeights_popup(listindex))
self.fittingpopups_ui[listindex].checkBox_lossW.clicked.connect(lambda on_or_off: self.lossWeights_activated(on_or_off,listindex))
self.fittingpopups_ui[listindex].Form.setWindowTitle(os.path.split(new_modelname)[1])
self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue(0) #set the progress bar to zero
self.fittingpopups_ui[listindex].pushButton_ShowExamleImgs_pop.clicked.connect(lambda: self.action_show_example_imgs_pop(listindex))
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.doubleClicked.connect(lambda item: self.tableWidget_HistoryInfo_pop_dclick(item,listindex))
#Cyclical learning rate extra settings
self.fittingpopups_ui[listindex].pushButton_cycLrPopup.clicked.connect(lambda: self.popup_clr_settings(listindex))
self.fittingpopups_ui[listindex].comboBox_optimizer.currentTextChanged.connect(lambda: self.expert_optimizer_changed(optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_LR_plot.clicked.connect(lambda: self.popup_lr_plot(listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_optimizer_pop.clicked.connect(lambda: self.optimizer_change_settings_popup(listindex))
worker = Worker(self.action_fit_model_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue)
#Define a func which prints information during fitting to textbrowser
#And furthermore provide option to do real-time plotting
def real_time_info(dic):
self.fittingpopups_ui[listindex].Histories.append(dic) #append to a list. Will be used for plotting in the "Update plot" function
OtherMetrics_keys = self.fittingpopups_ui[listindex].RealTime_OtherMetrics.keys()
#Append to lists for real-time plotting
self.fittingpopups_ui[listindex].RealTime_Acc.append(dic["acc"][0])
self.fittingpopups_ui[listindex].RealTime_ValAcc.append(dic["val_acc"][0])
self.fittingpopups_ui[listindex].RealTime_Loss.append(dic["loss"][0])
self.fittingpopups_ui[listindex].RealTime_ValLoss.append(dic["val_loss"][0])
keys = list(dic.keys())
#sort keys alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
for key in keys:
if "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
if not key in OtherMetrics_keys: #if this key is missing in self.fittingpopups_ui[listindex].RealTime_OtherMetrics attach it!
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key] = []
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key].append(dic[key])
dic_text = [("{} {}".format(item, np.round(amount[0],4))) for item, amount in dic.items()]
text = "Epoch "+str(self.fittingpopups_ui[listindex].epoch_counter)+"\n"+" ".join(dic_text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
self.fittingpopups_ui[listindex].epoch_counter+=1
if self.fittingpopups_ui[listindex].epoch_counter==1:
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.rowCount()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.insertRow(rowPosition)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setColumnCount(len(keys))
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setItem(rowPosition, columnPosition, item)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeColumnsToContents()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeRowsToContents()
########################Real-time plotting#########################
if self.fittingpopups_ui[listindex].checkBox_realTimePlotting_pop.isChecked():
#get the range for the real time fitting
if hasattr(self.fittingpopups_ui[listindex], 'historyscatters'):#if update plot was hit before
x = range(len(self.fittingpopups_ui[listindex].Histories))
realTimeEpochs = self.fittingpopups_ui[listindex].spinBox_realTimeEpochs.value()
if len(x)>realTimeEpochs:
x = x[-realTimeEpochs:]
#is any metric checked on the table?
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
for i in range(len(self.fittingpopups_ui[listindex].historyscatters)): #iterate over all available plots
key = list(self.fittingpopups_ui[listindex].historyscatters.keys())[i]
if key in selected_items:
if key=="acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Acc).astype(float)
elif key=="val_acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValAcc).astype(float)
elif key=="loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Loss).astype(float)
elif key=="val_loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValLoss).astype(float)
elif "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
y = np.array(self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key]).astype(float).reshape(-1,)
else:
return
#Only show the last 250 epochs
if y.shape[0]>realTimeEpochs:
y = y[-realTimeEpochs:]
if y.shape[0]==len(x):
self.fittingpopups_ui[listindex].historyscatters[key].setData(x, y)#,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,clear=False)
else:
print("x and y are not the same size! Omitted plotting. I will try again to plot after the next epoch.")
pg.QtGui.QApplication.processEvents()
self.fittingpopups_ui[listindex].epoch_counter = 0
#self.fittingpopups_ui[listindex].backup = [] #backup of the meta information -> in case the original folder is not accessible anymore
worker.signals.history.connect(real_time_info)
#Finally start the worker!
self.threadpool.start(worker)
self.fittingpopups[listindex].show()
def action_lr_finder(self):
#lr_find
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is not supported for Collections of models. Please select single model")
msg.setWindowTitle("LR screening not supported for Collections!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_classes = int(model_keras.output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
worker = Worker(self.action_lr_finder_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(print)
worker.signals.history.connect(print)
#Finally start the worker!
self.threadpool.start(worker)
def action_lr_finder_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
#listindex = self.popupcounter-1
#Get user-specified filename for the new model
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is currently not supported for Collections of models. Please use single model")
msg.setWindowTitle("LR screening not supported for Collections")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
trainable_original, layer_names = self.trainable_original, self.layer_names
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy()
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print(class_weight)
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
###############################Expert Mode values##################
if expert_mode==True:
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
print(text)
else:
text = "Could not understand user input at Expert->Dropout"
print(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
print(text_updates)
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(self.ram)))
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
percDataV = float(self.popup_lrfinder_ui.doubleSpinBox_percDataV.value())
percDataV = percDataV/100.0
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if len(self.ram)==0:#if there is no data available on ram
#replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:#get a similar generator, using the ram-data
gen_valid = aid_img.gen_crop_img_ram(self.ram,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
###################Load training data####################
#####################and perform#########################
##################Image augmentation#####################
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Should only a certain percentage of the numbers given in the table be sampled?
percDataT = float(self.popup_lrfinder_ui.doubleSpinBox_percDataT.value())
percDataT = percDataT/100.0
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Affine augmentation
X_train = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_train = np.copy(y_train)
Y_train = np_utils.to_categorical(y_train, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_train.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_train = X_train[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
#X_train = np.copy(X_train) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#In each iteration, start with non-augmented data
#X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
#X_train = X_train.astype(np.uint8)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_train = X_train.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_train = aid_img.contrast_augm_cv2(X_train,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_train = aid_img.satur_hue_augm_cv2(X_train.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_train = aid_img.avg_blur_cv2(X_train,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_train = aid_img.gauss_blur_cv(X_train,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_train = aid_img.motion_blur_cv(X_train,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_train = aid_img.brightn_noise_augm_cv2(X_train,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_train = aid_img.image_normalization(X_train,norm,mean_trainingdata,std_trainingdata)
else:
X_train = aid_img.image_normalization(X_train,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
if verbose == 1:
print("X_train.shape")
print(X_train.shape)
if xtra_in==True:
print("Add Xtra Data to X_train")
X_train = [X_train,xtra_train]
###################################################
###############Actual fitting######################
###################################################
batch_size = int(self.popup_lrfinder_ui.spinBox_batchSize.value())
stepsPerEpoch = int(self.popup_lrfinder_ui.spinBox_stepsPerEpoch.value())
epochs = int(self.popup_lrfinder_ui.spinBox_epochs.value())
start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
valMetrics = bool(self.popup_lrfinder_ui.checkBox_valMetrics.isChecked())
####################lr_find algorithm####################
if model_keras_p == None:
lrf = aid_dl.LearningRateFinder(model_keras)
elif model_keras_p != None:
lrf = aid_dl.LearningRateFinder(model_keras_p)
if valMetrics==True:
lrf.find([X_train,Y_train],[X_valid,Y_valid],start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
else:
lrf.find([X_train,Y_train],None,start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
skipBegin,skipEnd = 10,1
self.learning_rates = lrf.lrs[skipBegin:-skipEnd]
self.losses_or = lrf.losses_or[skipBegin:-skipEnd]
self.losses_sm = lrf.losses_sm[skipBegin:-skipEnd]
self.accs_or = lrf.accs_or[skipBegin:-skipEnd]
self.accs_sm = lrf.accs_sm[skipBegin:-skipEnd]
self.val_losses_sm = lrf.val_losses_sm[skipBegin:-skipEnd]
self.val_losses_or = lrf.val_losses_or[skipBegin:-skipEnd]
self.val_accs_sm = lrf.val_accs_sm[skipBegin:-skipEnd]
self.val_accs_or = lrf.val_accs_or[skipBegin:-skipEnd]
# Enable the groupboxes
self.popup_lrfinder_ui.groupBox_singleLr.setEnabled(True)
self.popup_lrfinder_ui.groupBox_LrRange.setEnabled(True)
self.update_lrfind_plot()
def update_lrfind_plot(self):
if not hasattr(self, 'learning_rates'):
return
metric = str(self.popup_lrfinder_ui.comboBox_metric.currentText())
color = self.popup_lrfinder_ui.pushButton_color.palette().button().color()
width = int(self.popup_lrfinder_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor = pg.mkPen(color, width=width)
smooth = bool(self.popup_lrfinder_ui.checkBox_smooth.isChecked())
try:# try to empty the plot
self.popup_lrfinder_ui.lr_plot.clear()
#self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_line)
except:
pass
if metric=="Loss" and smooth==True:
self.y_values = self.losses_sm
elif metric=="Loss" and smooth==False:
self.y_values = self.losses_or
elif metric=="Loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.losses_sm,n=1)
elif metric=="Loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.losses_or,n=1)
elif metric=="Accuracy" and smooth==True:
self.y_values = self.accs_sm
elif metric=="Accuracy" and smooth==False:
self.y_values = self.accs_or
elif metric=="Accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.accs_sm,n=1)
elif metric=="Accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.accs_or,n=1)
elif metric=="Val. loss" and smooth==True:
self.y_values = self.val_losses_sm
elif metric=="Val. loss" and smooth==False:
self.y_values = self.val_losses_or
elif metric=="Val. loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_losses_sm,n=1)
elif metric=="Val. loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_losses_or,n=1)
elif metric=="Val. accuracy" and smooth==True:
self.y_values = self.val_accs_sm
elif metric=="Val. accuracy" and smooth==False:
self.y_values = self.val_accs_or
elif metric=="Val. accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_accs_sm,n=1)
elif metric=="Val. accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_accs_or,n=1)
else:
print("The combination of "+str(metric)+" and smooth="+str(smooth)+" is not supported!")
if len(self.learning_rates)==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates), y=self.y_values,pen=pencolor,name=metric)
elif len(self.learning_rates)-1==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates)[1:], y=self.y_values,pen=pencolor,name=metric)
else:
print("No data available. Probably, validation metrics were not computed. Please click Run again.")
return
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_line)
#In case the groupBox_singleLr is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_singleLr.isChecked():
self.get_lr_single(on_or_off=True)
#In case the groupBox_LrRange is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_LrRange.isChecked():
self.get_lr_range(on_or_off=True)
def get_lr_single(self,on_or_off):
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
ind = np.argmin(self.y_values)#find location of loss-minimum
mini_x = self.learning_rates[ind]
mini_x = np.log10(mini_x)
pen = pg.mkPen(color="w")
self.lr_single = pg.InfiniteLine(pos=mini_x, angle=90, pen=pen, movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_single)
def position_changed():
#where did the user drag the region_linfit to?
new_position = 10**(self.lr_single.value())
self.popup_lrfinder_ui.lineEdit_singleLr.setText(str(new_position))
self.lr_single.sigPositionChangeFinished.connect(position_changed)
if on_or_off==False: #user unchecked the groupbox->remove the InfiniteLine if possible
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_single)
except:
pass
def get_lr_range(self,on_or_off):
#print(on_or_off)
#start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
#stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
start_x = 0.00001
start_x = np.log10(start_x)
ind = np.argmin(self.y_values)#find location of loss-minimum
end_x = self.learning_rates[ind]
end_x = np.log10(end_x)
self.lr_region = pg.LinearRegionItem([start_x, end_x], movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_region)
def region_changed():
#where did the user drag the region_linfit to?
new_region = self.lr_region.getRegion()
new_region_left = 10**(new_region[0])
new_region_right = 10**(new_region[1])
self.popup_lrfinder_ui.lineEdit_LrMin.setText(str(new_region_left))
self.popup_lrfinder_ui.lineEdit_LrMax.setText(str(new_region_right))
self.lr_region.sigRegionChangeFinished.connect(region_changed)
if on_or_off==False: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_region)
except:
pass
def action_show_example_imgs(self): #this function is only for the main window
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Get state of the comboboxes!
tr_or_valid = str(self.comboBox_ShowTrainOrValid.currentText())
w_or_wo_augm = str(self.comboBox_ShowWOrWoAug.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#which index is requested by user:?
req_index = int(self.spinBox_ShowIndex.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata was zero and is now set to 0.0001 to avoid div. by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Cropping and image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try: #When all cells are at the border of the image, the generator will be empty. Avoid program crash by try, except
X.append(next(gen)[0])
except StopIteration:
print("All events at border of image and discarded")
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
X = X.astype(np.uint8) #make sure we stay in uint8
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
if verbose: print("Shape of the shown images is:"+str(X.shape))
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try:
X.append(next(gen)[0])
except:
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3) #Add the "channels" dimension
else:
print("Invalid data dimension: " +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
if verbose: print("Shape of the shown images is: "+str(X.shape))
#Is there already anything shown on the widget?
children = self.widget_ViewImages.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.gridLayout_ViewImages.count())):
widgetToRemove = self.gridLayout_ViewImages.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.gridLayout_ViewImages = QtWidgets.QGridLayout(self.widget_ViewImages)
for i in range(5):
if channels==1:
img = X[i,:,:,0] #TensorFlow
if channels==3:
img = X[i,:,:,:] #TensorFlow
#Stretch pixel value to full 8bit range (0-255); only for display
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
if channels==1:
height, width = img.shape
if channels==3:
height, width, _ = img.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.image_show = pg.ImageView(self.widget_ViewImages)
self.image_show.show()
if verbose: print("Shape of zoomed image: "+str(img.shape))
if channels==1:
self.image_show.setImage(img.T,autoRange=False)
if channels==3:
self.image_show.setImage(np.swapaxes(img,0,1),autoRange=False)
self.image_show.ui.histogram.hide()
self.image_show.ui.roiBtn.hide()
self.image_show.ui.menuBtn.hide()
self.gridLayout_ViewImages.addWidget(self.image_show, 1,i)
self.widget_ViewImages.show()
def tableWidget_HistoryInfo_pop_dclick(self,item,listindex):
if item is not None:
tableitem = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
#self.update_historyplot_pop(listindex)
def action_show_example_imgs_pop(self,listindex): #this function is only for the main window
#Get state of the comboboxes!
tr_or_valid = str(self.fittingpopups_ui[listindex].comboBox_ShowTrainOrValid_pop.currentText())
w_or_wo_augm = str(self.fittingpopups_ui[listindex].comboBox_ShowWOrWoAug_pop.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.value())
norm = str(self.fittingpopups_ui[listindex].comboBox_Normalization_pop.currentText())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText()).lower()
#which index is requested by user:?
req_index = int(self.fittingpopups_ui[listindex].spinBox_ShowIndex_pop.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
if len(self.ram)==0:
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata turned out to be zero. I set it to 0.0001, to avoid division by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Get cropped images with image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X.shape))
if channels==1:
#Add the "channels" dimension
X = np.expand_dims(X,3)
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels = 3
elif len(X.shape)==3:
channels = 1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
#Is there already anything shown on the widget?
children = self.fittingpopups_ui[listindex].widget_ViewImages_pop.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.count())):
widgetToRemove = self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop = QtWidgets.QGridLayout(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
for i in range(5):
if channels==1:
img = X[i,:,:,0]
if channels==3:
img = X[i,:,:,:]
#Normalize image to full 8bit range (from 0 to 255)
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
# height, width = img_zoom.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages_pop.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.fittingpopups_ui[listindex].image_show_pop = pg.ImageView(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
self.fittingpopups_ui[listindex].image_show_pop.show()
if channels==1:
self.fittingpopups_ui[listindex].image_show_pop.setImage(img.T,autoRange=False)
if channels==3:
self.fittingpopups_ui[listindex].image_show_pop.setImage(np.swapaxes(img,0,1),autoRange=False)
self.fittingpopups_ui[listindex].image_show_pop.ui.histogram.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.roiBtn.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.menuBtn.hide()
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.addWidget(self.fittingpopups_ui[listindex].image_show_pop, 1,i)
self.fittingpopups_ui[listindex].widget_ViewImages_pop.show()
def get_color_mode(self):
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
return "Grayscale"
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
return "RGB"
else:
return None
def checkBox_rollingMedian_statechange(self,item):#used in frontend
self.horizontalSlider_rollmedi.setEnabled(item)
def update_historyplot(self):
#After loading a history, there are checkboxes available. Check, if user checked some:
colcount = self.tableWidget_HistoryItems.columnCount()
#Collect items that are checked
selected_items = []
Colors = []
for colposition in range(colcount):
#get checkbox item and; is it checked?
cb = self.tableWidget_HistoryItems.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
#Get a list of the color from the background of the table items
DF1 = self.loaded_history
#Clear the plot
self.widget_Scatterplot.clear()
#Add plot
self.plt1 = self.widget_Scatterplot.addPlot()
self.plt1.showGrid(x=True,y=True)
self.plt1.addLegend()
self.plt1.setLabel('bottom', 'Epoch', units='')
self.plot_rollmedis = [] #list for plots of rolling medians
if "Show saved only" in selected_items:
#nr_of_selected_items = len(selected_items)-1
#get the "Saved" column from DF1
saved = DF1["Saved"]
saved = np.where(np.array(saved==1))[0]
# else:
# nr_of_selected_items = len(selected_items)
self.Colors = Colors
scatter_x,scatter_y = [],[]
for i in range(len(selected_items)):
key = selected_items[i]
if key!="Show saved only":
df = DF1[key]
epochs = range(len(df))
win = int(self.horizontalSlider_rollmedi.value())
rollmedi = df.rolling(window=win).median()
if "Show saved only" in selected_items:
df = np.array(df)[saved]
epochs = np.array(epochs)[saved]
rollmedi = pd.DataFrame(df).rolling(window=win).median()
scatter_x.append(epochs)
scatter_y.append(df)
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
self.plt1.plot(epochs, df,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
if bool(self.checkBox_rollingMedian.isChecked()):#Should a rolling median be plotted?
try:
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
except Exception as e:
#There is an issue for the rolling median plotting!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(str(e)+"\n->There are likely too few points to have a rolling median with such a window size ("+str(round(win))+")")
msg.setWindowTitle("Error occured when plotting rolling median:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
if len(str(self.lineEdit_LoadHistory.text()))==0:
#if DF1==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please load History file first (.meta)")
msg.setWindowTitle("No History file loaded")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if len(scatter_x)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please select at least one of " +"\n".join(list(DF1.keys())))
msg.setWindowTitle("No quantity selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Keep the information as lists available for this function
self.scatter_x_l, self.scatter_y_l = scatter_x,scatter_y
if bool(self.checkBox_linearFit.isChecked()):
#Put a liner region on the plot; cover the last 10% of points
if np.max(np.concatenate(scatter_x))<12:
start_x = 0
end_x = np.max(np.concatenate(scatter_x))+1
else:
start_x = int(0.9*np.max(np.concatenate(scatter_x)))
end_x = int(1.0*np.max(np.concatenate(scatter_x)))
self.region_linfit = pg.LinearRegionItem([start_x, end_x], bounds=[-np.inf,np.inf], movable=True)
self.plt1.addItem(self.region_linfit)
def region_changed():
try: #clear the plot from other fits if there are any
if len(self.plot_fits)>0:
for i in range(len(self.plot_fits)):
self.plt1.legend.removeItem(self.names[i])
self.plt1.removeItem(self.plot_fits[i])
except:
pass
#where did the user drag the region_linfit to?
new_region = self.region_linfit.getRegion()
#for each curve, do a linear regression
self.plot_fits,self.names = [], []
for i in range(len(self.scatter_x_l)):
scatter_x_vals = np.array(self.scatter_x_l[i])
ind = np.where( (scatter_x_vals<new_region[1]) & (scatter_x_vals>new_region[0]) )
scatter_x_vals = scatter_x_vals[ind]
scatter_y_vals = np.array(self.scatter_y_l[i])[ind]
if len(scatter_x_vals)>1:
fit = np.polyfit(scatter_x_vals,scatter_y_vals,1)
fit_y = fit[0]*scatter_x_vals+fit[1]
pencolor = pg.mkColor(self.Colors[i].color())
pen = pg.mkPen(color=pencolor,width=6)
text = 'y='+("{:.2e}".format(fit[0]))+"x + " +("{:.2e}".format(fit[1]))
self.names.append(text)
self.plot_fits.append(self.plt1.plot(name=text))
self.plot_fits[i].setData(scatter_x_vals,fit_y,pen=pen,clear=False,name=text)
self.region_linfit.sigRegionChangeFinished.connect(region_changed)
def slider_changed():
if bool(self.checkBox_rollingMedian.isChecked()):
#remove other rolling median lines:
for i in range(len(self.plot_rollmedis)):
self.plt1.removeItem(self.plot_rollmedis[i])
#Start with fresh list
self.plot_rollmedis = []
win = int(self.horizontalSlider_rollmedi.value())
for i in range(len(self.scatter_x_l)):
epochs = np.array(self.scatter_x_l[i])
if type(self.scatter_y_l[i]) == pd.core.frame.DataFrame:
rollmedi = self.scatter_y_l[i].rolling(window=win).median()
else:
rollmedi = pd.DataFrame(self.scatter_y_l[i]).rolling(window=win).median()
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
pencolor = pg.mkColor(self.Colors[i].color())
pen_rollmedi = pg.mkPen(color=pencolor,width=6)
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
self.horizontalSlider_rollmedi.sliderMoved.connect(slider_changed)
scatter_x = np.concatenate(scatter_x)
scatter_y = np.concatenate(scatter_y)
scatter_x_norm = (scatter_x.astype(float))/float(np.max(scatter_x))
scatter_y_norm = (scatter_y.astype(float))/float(np.max(scatter_y))
self.model_was_selected_before = False
def onClick(event):
#Get all plotting items
#if len(self.plt1.listDataItems())==nr_of_selected_items+1:
#delete the last item if the user selected already one:
if self.model_was_selected_before:
self.plt1.removeItem(self.plt1.listDataItems()[-1])
items = self.widget_Scatterplot.scene().items(event.scenePos())
#get the index of the viewbox
isviewbox = [type(item)==pg.graphicsItems.ViewBox.ViewBox for item in items]
index = np.where(np.array(isviewbox)==True)[0]
vb = np.array(items)[index]
try: #when user rescaed the vew and clicks somewhere outside, it could appear an IndexError.
clicked_x = float(vb[0].mapSceneToView(event.scenePos()).x())
clicked_y = float(vb[0].mapSceneToView(event.scenePos()).y())
except:
return
try:
a1 = (clicked_x)/float(np.max(scatter_x))
a2 = (clicked_y)/float(np.max(scatter_y))
except Exception as e:
print(str(e))
return
#Which is the closest scatter point?
dist = np.sqrt(( a1-scatter_x_norm )**2 + ( a2-scatter_y_norm )**2)
index = np.argmin(dist)
clicked_x = scatter_x[index]
clicked_y = scatter_y[index]
#Update the spinBox
#self.spinBox_ModelIndex.setValue(int(clicked_x))
#Modelindex for textBrowser_SelectedModelInfo
text_index = "\nModelindex: "+str(clicked_x)
#Indicate the selected model on the scatter plot
self.plt1.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
#Get more information about this model
Modelname = str(self.loaded_para["Modelname"].iloc[0])
path, filename = os.path.split(Modelname)
filename = filename.split(".model")[0]+"_"+str(clicked_x)+".model"
path = os.path.join(path,filename)
if os.path.isfile(path):
text_path = "\nFile is located in:"+path
else:
text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
text_acc = str(DF1.iloc[clicked_x])
self.textBrowser_SelectedModelInfo.setText("Loaded model: "+filename+text_index+text_path+"\nPerformance:\n"+text_acc)
self.model_was_selected_before = True
self.model_2_convert = path
self.widget_Scatterplot.scene().sigMouseClicked.connect(onClick)
def action_load_history(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_load_history_current(self):
if self.model_keras_path==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no fitting going on")
msg.setWindowTitle("No current fitting process!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
history_path = self.model_keras_path
if type(history_path)==list:#collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemented for collections. Please use 'Load History' button to specify a single .meta file")
msg.setWindowTitle("Not implemented for collecitons")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
filename = history_path.split("_0.model")[0]+"_meta.xlsx"
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_plot_history(self,filename):
#If there is a file, it can happen that fitting is currently going on
#and with bad luck AID just tries to write to the file. This would cause a crash.
#Therfore, first try to copy the file to a temporary folder. If that fails,
#wait 1 seconds and try again
#There needs to be a "temp" folder. If there os none, create it!
#does temp exist?
tries = 0 #during fitting, AID sometimes wants to write to the history file. In this case we cant read
try:
while tries<15:#try a few times
try:
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
#Create a random filename for a temp. file
someletters = list("STERNBURGPILS")
temporaryfile = np.random.choice(someletters,5,replace=True)
temporaryfile = "".join(temporaryfile)+".xlsx"
temporaryfile = os.path.join(temp_path,temporaryfile)
shutil.copyfile(filename,temporaryfile) #copy the original excel file there
dic = pd.read_excel(temporaryfile,sheet_name='History',index_col=0) #open it there
self.loaded_history = dic
para = pd.read_excel(temporaryfile,sheet_name='Parameters')
print(temporaryfile)
#delete the tempfile
os.remove(temporaryfile)
self.loaded_para = para
tries = 16
except:
time.sleep(1.5)
tries+=1
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Check if dic exists now
try:
keys = list(dic.keys())
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#sort the list alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
#Lastly check if there is "Saved" or "Time" present and shift it to the back
keys_last = ["Saved","Time"]
for i in range(len(keys_last)):
if keys_last[i] in keys:
ind = np.where(np.array(keys)==keys_last[i])[0][0]
if ind!=len(keys):
del keys[ind]
keys.append(keys_last[i])
self.tableWidget_HistoryItems.setColumnCount(len(keys)+1) #+1 because of "Show saved only"
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.tableWidget_HistoryItems.rowCount()
if rowPosition==0:
self.tableWidget_HistoryItems.insertRow(0)
else:
rowPosition=0
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
#One checkbox at the end to switch on/of to show only the models that are saved
columnPosition = len(keys)
item = QtWidgets.QTableWidgetItem("Show saved only")#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
self.tableWidget_HistoryItems.resizeColumnsToContents()
self.tableWidget_HistoryItems.resizeRowsToContents()
def history_tab_get_model_path(self):#Let user define a model he would like to convert
#pushButton_LoadModel
#Open a QFileDialog
filepath = QtWidgets.QFileDialog.getOpenFileName(self, 'Select a trained model you want to convert', Default_dict["Path of last model"],"Keras Model file (*.model)")
filepath = filepath[0]
if os.path.isfile(filepath):
self.model_2_convert = filepath
path, filename = os.path.split(filepath)
try:
modelindex = filename.split(".model")[0]
modelindex = int(modelindex.split("_")[-1])
except:
modelindex = np.nan
self.textBrowser_SelectedModelInfo.setText("Error loading model")
return
text = "Loaded model: "+filename+"\nModelindex: "+str(modelindex)+"\nFile is located in: "+filepath
self.textBrowser_SelectedModelInfo.setText(text)
def history_tab_convertModel(self):
#Check if there is text in textBrowser_SelectedModelInfo
path = self.model_2_convert
try:
os.path.isfile(path)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No file defined!")
msg.setWindowTitle("No file defined!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if not os.path.isfile(path):
#text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
#self.pushButton_convertModel.setEnabled(False)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("\nFile not found!:"+path+"\nProbably the .model was deleted or not saved")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If the source format is Keras tensforflow:
source_format = str(self.combobox_initial_format.currentText())
target_format = str(self.comboBox_convertTo.currentText()) #What is the target format?
##TODO: All conversion methods to multiprocessing functions!
def conversion_successful_msg(text):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
##################Keras TensorFlow -> .nnet############################
if target_format==".nnet" and source_format=="Keras TensorFlow":
ConvertToNnet = 1
worker = Worker(self.history_tab_convertModel_nnet_worker,ConvertToNnet)
def get_model_keras_from_worker(dic):
self.model_keras = dic["model_keras"]
worker.signals.history.connect(get_model_keras_from_worker)
def conversion_successful(i):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Conversion Keras TensorFlow -> .nnet done"
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#self.pushButton_convertModel.setEnabled(True)
worker.signals.history.connect(conversion_successful)
self.threadpool.start(worker)
##################Keras TensorFlow -> Frozen .pb#######################
elif target_format=="Frozen TensorFlow .pb" and source_format=="Keras TensorFlow":
#target filename should be like source +_frozen.pb
path_new = os.path.splitext(path)[0] + "_frozen.pb"
aid_dl.convert_kerastf_2_frozen_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Frozen .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> Optimized .pb####################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_kerastf_2_optimized_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Optimized .pb is done"
conversion_successful_msg(text)
####################Frozen -> Optimized .pb############################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Frozen TensorFlow .pb":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_frozen_2_optimized_pb(path,path_new)
text = "Conversion Frozen -> Optimized .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX####################
elif target_format=="ONNX (via keras2onnx)" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + ".onnx"
aid_dl.convert_kerastf_2_onnx(path,path_new)
text = "Conversion Keras TensorFlow -> ONNX (via keras2onnx) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX via MMdnn####################
elif target_format=="ONNX (via MMdnn)" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_onnx_mmdnn(path)
text = "Conversion Keras TensorFlow -> ONNX (via MMdnn) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> PyTorch Script####################
elif target_format=="PyTorch Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"pytorch")
text = "Conversion Keras TensorFlow -> PyTorch Script is done. You can now use this script and the saved weights to build the model using your PyTorch installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Caffe Script####################
elif target_format=="Caffe Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"caffe")
text = "Conversion Keras TensorFlow -> Caffe Script is done. You can now use this script and the saved weights to build the model using your Caffe installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CNTK Script####################
elif target_format=="CNTK Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"cntk")
text = "Conversion Keras TensorFlow -> CNTK Script is done. You can now use this script and the saved weights to build the model using your CNTK installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> mxnet Script####################
elif target_format=="MXNet Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"mxnet")
text = "Conversion Keras TensorFlow -> MXNet Script is done. You can now use this script and the saved weights to build the model using your MXNet installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> onnx Script####################
elif target_format=="ONNX Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"onnx")
text = "Conversion Keras TensorFlow -> ONNX Script is done. You can now use this script and the saved weights to build the model using your ONNX installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> TensorFlow Script####################
elif target_format=="TensorFlow Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"tensorflow")
text = "Conversion Keras TensorFlow -> TensorFlow Script is done. You can now use this script and the saved weights to build the model using your Tensorflow installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Keras Script####################
elif target_format=="Keras Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"keras")
text = "Conversion Keras TensorFlow -> Keras Script is done. You can now use this script and the saved weights to build the model using your Keras installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CoreML####################
elif "CoreML" in target_format and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_coreml(path)
text = "Conversion Keras TensorFlow -> CoreML is done."
conversion_successful_msg(text)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemeted (yet)")
msg.setWindowTitle("Not implemeted (yet)")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If that worked without error, save the filepath for next time
Default_dict["Path of last model"] = os.path.split(path)[0]
aid_bin.save_aid_settings(Default_dict)
def history_tab_convertModel_nnet_worker(self,ConvertToNnet,progress_callback,history_callback):
#Define a new session -> Necessary for threading in TensorFlow
#with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
with tf.Session() as sess:
path = self.model_2_convert
try:
model_keras = load_model(path,custom_objects=aid_dl.get_custom_metrics())
except:
model_keras = load_model(path)
dic = {"model_keras":model_keras}
history_callback.emit(dic)
progress_callback.emit(1)
if ConvertToNnet==1:
#Since this happened in a thread, TensorFlow cant access it anywhere else
#Therefore perform Conversion to nnet right away:
model_config = model_keras.get_config()#["layers"]
if type(model_config)==dict:
model_config = model_config["layers"]#for keras version>2.2.3, there is a change in the output of get_config()
#Convert model to theano weights format (Only necesary for CNNs)
for layer in model_keras.layers:
if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
original_w = K.get_value(layer.W)
converted_w = convert_kernel(original_w)
K.set_value(layer.W, converted_w)
nnet_path, nnet_filename = os.path.split(self.model_2_convert)
nnet_filename = nnet_filename.split(".model")[0]+".nnet"
out_path = os.path.join(nnet_path,nnet_filename)
aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# sess.close()
# try:
# aid_dl.reset_keras()
# except:
# print("Could not reset Keras (1)")
def history_tab_ConvertToNnet(self):
print("Not used")
# model_keras = self.model_keras
# model_config = model_keras.get_config()["layers"]
# #Convert model to theano weights format (Only necesary for CNNs)
# for layer in model_keras.layers:
# if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
# original_w = K.get_value(layer.W)
# converted_w = convert_kernel(original_w)
# K.set_value(layer.W, converted_w)
#
# nnet_path, nnet_filename = os.path.split(self.model_2_convert)
# nnet_filename = nnet_filename.split(".model")[0]+".nnet"
# out_path = os.path.join(nnet_path,nnet_filename)
# aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Successfully converted model and saved to\n"+out_path)
# msg.setWindowTitle("Successfully converted model!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# self.pushButton_convertModel.setEnabled(False)
#TODO
def test_nnet(self):
#I need a function which calls a cpp app that uses the nnet and applies
#it on a random image.
#The same image is also used as input the the original .model and
#both results are then compared
print("Not implemented yet")
print("Placeholder")
print("Building site")
def actionDocumentation_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "Currently, there is no detailed written documentation. AIDeveloper instead makes strong use of tooltips."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Documentation")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionSoftware_function(self):
if sys.platform == "win32":
plat = "win"
elif sys.platform=="darwin":
plat = "mac"
elif sys.platform=="linux":
plat = "linux"
else:
print("Unknown Operating system")
plat = "Win"
dir_deps = os.path.join(dir_root,"aid_dependencies_"+plat+".txt")#dir to aid_dependencies
f = open(dir_deps, "r")
text_modules = f.read()
f.close()
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "<html><head/><body><p>AIDeveloper "+str(VERSION)+"<br>"+sys.version+"<br>Click 'Show Details' to retrieve a list of all Python packages used."+"<br>AID_GPU uses CUDA (NVIDIA) to facilitate GPU processing</p></body></html>"
msg.setText(text)
msg.setDetailedText(text_modules)
msg.setWindowTitle("Software")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionAbout_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "AIDeveloper is written and maintained by <NAME>. Use <EMAIL> to contact the main developer if you find bugs or if you wish a particular feature. Icon theme 2 was mainly designed and created by <NAME>."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("About")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionLoadSession_function(self):
#This function should allow to select and load a metafile and
#Put the GUI the the corresponing state (place the stuff in the table, click Train/Valid)
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta or session file (*meta.xlsx *session.xlsx)")
filename = filename[0]
if len(filename)==0:
return
xlsx = pd.ExcelFile(filename)
UsedData = pd.read_excel(xlsx,sheet_name="UsedData")
Files = list(UsedData["rtdc_path"])
file_exists = [os.path.exists(url) for url in Files]
ind_true = np.where(np.array(file_exists)==True)[0]
UsedData_true = UsedData.iloc[ind_true]
Files_true = list(UsedData_true["rtdc_path"]) #select the indices that are valid
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(Files_true)
#update the index, train/valid checkbox and shuffle checkbox
for i in range(len(Files_true)):
#set the index (celltype)
try:
index = int(np.array(UsedData_true["class"])[i])
except:
index = int(np.array(UsedData_true["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_true["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_true["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_true["nr_cells_epoch"])[i])
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_true["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_true["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_true["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
#Now take care of missing data
#Take care of missing files (they might have been moved to a different location)
ind_false = np.where(np.array(file_exists)==False)[0]
#Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
if len(ind_false)>0:
UsedData_false = UsedData.iloc[ind_false]
Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
self.dataDropped(Files_false)
self.user_selected_path = None
#Create popup that informs user that there is missing data and let him specify a location
#to search for the missing files
def add_missing_files():
filename = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
user_selected_path = filename
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#get the hashes
hashes = list(np.array(UsedData_false["hash"])[ind_false])
paths = list(np.array(UsedData_false["rtdc_path"])[ind_false])
paths_new,info = aid_bin.find_files(user_selected_path,paths,hashes)
text = ('\n'.join([str(a) +"\t"+ b for a,b in zip(paths_new,info)]))
self.textBrowser_Info_pop2.setText(text)
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(paths_new)
for i in range(len(paths_new)):
#set the index (celltype)
try:
index = int(np.array(UsedData_false["class"])[i])
except:
index = int(np.array(UsedData_false["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_false["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_false["nr_cells_epoch"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_false["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_false["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_false["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
self.w_pop2 = MyPopup()
self.gridLayout_w_pop2 = QtWidgets.QGridLayout(self.w_pop2)
self.gridLayout_w_pop2.setObjectName("gridLayout_w_pop2")
self.verticalLayout_w_pop2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w_pop2.setObjectName("verticalLayout_w_pop2")
self.horizontalLayout_w_pop2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w_pop2.setObjectName("horizontalLayout_w_pop2")
self.pushButton_Close_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Close_pop2.setObjectName("pushButton_Close_pop2")
self.pushButton_Close_pop2.clicked.connect(self.w_pop2.close)
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Close_pop2)
self.pushButton_Search_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Search_pop2.clicked.connect(add_missing_files)
self.pushButton_Search_pop2.setObjectName("pushButton_Search")
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Search_pop2)
self.verticalLayout_w_pop2.addLayout(self.horizontalLayout_w_pop2)
self.textBrowser_Info_pop2 = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser_Info_pop2.setObjectName("textBrowser_Info_pop2")
self.verticalLayout_w_pop2.addWidget(self.textBrowser_Info_pop2)
self.gridLayout_w_pop2.addLayout(self.verticalLayout_w_pop2, 0, 0, 1, 1)
self.w_pop2.setWindowTitle("There are missing files. Do you want to search for them?")
self.pushButton_Close_pop2.setText("No")
self.pushButton_Search_pop2.setText("Define folder to search files")
self.w_pop2.show()
#Ask user if only data, or the full set of parameters should be loaded
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setText(tooltips["msg_loadSession"])
msg.setWindowTitle("Load only data table all parameters?")
msg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Save)# | QtGui.QMessageBox.Cancel)
dataonly = msg.button(QtGui.QMessageBox.Yes)
dataonly.setText('Data table only')
allparams = msg.button(QtGui.QMessageBox.Save)
allparams.setText('Data and all parameters')
# cancel = msg.button(QtGui.QMessageBox.Cancel)
# cancel.setText('Cancel')
msg.exec_()
#Only update the data table.
if msg.clickedButton()==dataonly: #show image and heatmap overlay
pass
#Load the parameters
elif msg.clickedButton()==allparams: #show image and heatmap overlay
Parameters = pd.read_excel(xlsx,sheet_name="Parameters")
aid_frontend.load_hyper_params(self,Parameters)
# if msg.clickedButton()==cancel: #show image and heatmap overlay
# return
#If all this run without error, save the path.
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#Update the overview-box
if self.groupBox_DataOverview.isChecked()==True:
self.dataOverviewOn()
def actionSaveSession_function(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save session', Default_dict["Path of last model"],"AIDeveloper Session file (*_session.xlsx)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if fname.endswith(".xlsx"):
fname = fname.split(".xlsx")[0]
if fname.endswith("_session"):
fname = fname.split("_session")[0]
if fname.endswith("_meta"):
fname = fname.split("_meta")[0]
if fname.endswith(".model"):
fname = fname.split(".model")[0]
if fname.endswith(".arch"):
fname = fname.split(".arch")[0]
#add the suffix _session.xlsx
if not fname.endswith("_session.xlsx"):
fname = fname +"_session.xlsx"
filename = os.path.join(path,fname)
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the -session.xlsx
SelectedFiles = self.items_clicked()
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
#Get all hyper parameters
Para_dict = pd.DataFrame()
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict = aid_frontend.get_hyper_params(Para_dict,self)
Para_dict.to_excel(writer,sheet_name='Parameters')
writer.save()
writer.close()
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Successfully saved as "+filename)
msg.setWindowTitle("Successfully saved")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def actionClearList_function(self):
#Remove all items from dragdrop table
while (self.table_dragdrop.rowCount() > 0):
self.table_dragdrop.removeRow(0)
#reset ram
self.ram = dict()
#Remove all items from comboBox_chooseRtdcFile
self.comboBox_chooseRtdcFile.clear()
self.comboBox_selectData.clear()
if self.groupBox_DataOverview.isChecked()==True:
self.dataOverviewOn()
def actionRemoveSelected_function(self):
#Which rows are highlighted?
rows_selected = np.array([index.row() for index in self.table_dragdrop.selectedIndexes()])
for row in (rows_selected):
self.table_dragdrop.removeRow(row)
self.comboBox_chooseRtdcFile.removeItem(row)
self.comboBox_selectData.removeItem(row)
#if there are rows below this row, they will move up one step:
ind = np.where(np.array(rows_selected)>row)[0]
rows_selected[ind] -= 1
def actionSaveToPng_function(self):
#Which table items are selected?
rows_selected = np.array([index.row() for index in self.table_dragdrop.selectedIndexes()])
if len(rows_selected)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("Please first select rows in the table!")
msg.setWindowTitle("No rows selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Ask user to which folder the images should be written:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to .png/.jpg', Default_dict["Path of last model"],"Image file format (*.png *.jpg *.bmp *.eps *.gif *.ico *.icns)")
filename = filename[0]
if len(filename)==0:
return
filename_X, file_extension = os.path.splitext(filename)#divide into path and file_extension if possible
#Check if the chosen file_extension is valid
if not file_extension in [".png",".jpg",".bmp",".eps",".gif",".ico",".icns"]:
print("Invalid file extension detected. Will use .png instead.")
file_extension = ".png"
#Check the chosen export-options
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Plase choose a different Export-option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if save_cropped==True:
#Collect information for image processing
cropsize = self.spinBox_imagecrop.value()
color_mode = str(self.comboBox_loadedRGBorGray.currentText())
#zoom_methods = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = np.where(np.array(zoom_methods)==True)[0]
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
index = 0
for row in (rows_selected):
#get the corresponding rtdc_path
rtdc_path = str(self.table_dragdrop.cellWidget(row, 0).text())
nr_events = None #no number needed as we take all images (replace=False in gen_crop_img)
zoom_factor = float(self.table_dragdrop.item(row, 9).text())
gen = aid_img.gen_crop_img(cropsize,rtdc_path,nr_events=nr_events,replace=False,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_order,color_mode=color_mode,padding_mode='constant')
images = next(gen)[0]
#Save the images data to .png/.jpeg...
for img in images:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
if save_cropped==False:#save the original images without pre-processing
index = 0
for row in (rows_selected):
rtdc_path = str(self.table_dragdrop.cellWidget(row, 0).text())
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
images = rtdc_ds["events"]["image"] #get the images
#Save the images data to .png/.jpeg...
for img in images:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def actionRemoveSelectedPeaks_function(self):
#Which rows are highlighted?
rows_selected = np.array([index.row() for index in self.tableWidget_showSelectedPeaks.selectedIndexes()])
#delete each row only once :)
rows_selected = np.unique(rows_selected)
for row in (rows_selected):
self.tableWidget_showSelectedPeaks.removeRow(row)
#if there are rows below this row, they will move up one step:
ind = np.where(np.array(rows_selected)>row)[0]
rows_selected[ind] -=1
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def actionRemoveAllPeaks_function(self):
#Remove all items from tableWidget_showSelectedPeaks
while (self.tableWidget_showSelectedPeaks.rowCount() > 0):
self.tableWidget_showSelectedPeaks.removeRow(0)
def actionDataToRamNow_function(self):
self.statusbar.showMessage("Moving data to RAM")
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),crop,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Successfully moved data to RAM")
msg.setWindowTitle("Moved Data to RAM")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
self.statusbar.showMessage("")
###########################################################################
###########################################################################
###########################################################################
###########################################################################
#######################Functions for Assess model tab######################
def assessmodel_tab_load_model(self):
#Get the requested model-name from the chosen metafile
#Open a QFileDialog
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Select a trained model you want to assess', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if os.path.isfile(filename):
#Put this path on the Assess Model tab
self.lineEdit_LoadModel_2.setText(filename)
#Save the path to a variable that is then used by history_tab_convertModel_nnet_worker
self.load_model_path = filename
#Get the modelindex
path,filename = os.path.split(filename)
modelindex = filename.split(".model")[0]
modelindex = int(modelindex.split("_")[-1])
#Update the modelindex on the Assess Model tab
self.spinBox_ModelIndex_2.setValue(int(modelindex))
model_full_h5 = h5py.File(self.load_model_path, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
self.spinBox_Crop_2.setValue(int(in_dim[-2]))
self.spinBox_OutClasses_2.setValue(int(out_dim))
print("input dimension:"+str(in_dim))
#Adjust the Color mode in the UI:
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
#Set the combobox on Assess model tab to Grayscale; just info for user
index = self.comboBox_loadedRGBorGray.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_loadedRGBorGray.setCurrentIndex(index)
#Check the currently set color_mode. This is important since images are loaded accordingly
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
#Set the combobox on Assess model tab to Grayscale; just info for user
index = self.comboBox_loadedRGBorGray.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_loadedRGBorGray.setCurrentIndex(index)
#Check the currently set color_mode. This is important since images are loaded accordingly
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Channel dimensions of model ("+str(channels)+" channels) is not supported. Only 1 or 3 channels are allowed.")
msg.setWindowTitle("Unsupported channel dimension")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
try:
img_processing_settings = aid_img.load_model_meta(metafile_path)
self.img_processing_settings = img_processing_settings
model_type = str(img_processing_settings["model_type"].values[0])
normalization_method = str(img_processing_settings["normalization_method"].values[0])
index = self.comboBox_Normalization_2.findText(normalization_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization_2.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Unkown normalization method found in .meta file")
msg.setWindowTitle("Unkown normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_ModelSelection_2.setText(model_type)
except: #there is not such a file, or the file cannot be opened
#Ask the user to choose the normalization method
self.lineEdit_ModelSelection_2.setText("Unknown")
self.comboBox_Normalization_2.setEnabled(True)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Meta file not found/ Could not be read. Please specify the normalization method manually (dropdown menu)")
msg.setWindowTitle("No .meta available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(self.load_model_path)[0]
aid_bin.save_aid_settings(Default_dict)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found!:\nProbably the .model was deleted or not saved")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def inference_time_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Initiate a fresh session
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
#Multi-GPU
if deviceSelected=="Multi-GPU":
print("Adjusting the model for Multi-GPU")
model_keras = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
#Get the model input dimensions
in_dim = np.array(model_keras.get_input_shape_at(0))
ind = np.where(in_dim==None)
in_dim[ind] = 1
nr_imgs = self.spinBox_inftime_nr_images.value()
nr_imgs = int(np.round(float(nr_imgs)/10.0))
#Warm up by predicting a single image
image = (np.random.randint(0,255,size=in_dim)).astype(np.float32)/255.0
model_keras.predict(image) # warm up
Times = []
for k in range(10):
image = (np.random.randint(0,255,size=in_dim)).astype(np.float32)/255.0
t1 = time.time()
for i in range(nr_imgs):#predict 50 times 20 images
model_keras.predict(image)
t2 = time.time()
dt = (t2-t1)/(nr_imgs) #divide by nr_imgs to get time [s] per image
dt = dt*1000.0 #multiply by 1000 to change to ms range
dic = {"outp":str(round(dt,3))+"ms"}
history_callback.emit(dic)
Times.append(dt)
#Send out the Times
text = " [ms] Mean: "+str(round(np.mean(Times),3))+"; "+"Median: "+str(round(np.median(Times),3))+"; "+"Min: "+str(round(np.min(Times),3))+"; "+"Max: "+str(round(np.max(Times),3))
dic = {"outp":text}
history_callback.emit(dic)
progress_callback.emit(1) #when finished return one
self.threadpool_single_queue = 0 #reset the thread-counter
def inference_time(self):
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
#Inform user that certain config is used for inference
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Will use "+deviceSelected+" for inference. To change bewtween CPU and GPU, use the options on the Build-Tab")
msg.setWindowTitle("CPU used for inference")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Take the model path from the GUI
self.load_model_path = str(self.lineEdit_LoadModel_2.text())
if len(self.load_model_path)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Increase the thread-counter by one; only after finishing the thread, it will be reset to 0
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.inference_time_worker)
def get_dt_from_worker(dic):
outp = dic["outp"]
self.lineEdit_InferenceTime.setText(outp)
worker.signals.history.connect(get_dt_from_worker)
self.threadpool_single.start(worker)
def update_check_worker(self,progress_callback,history_callback):
#Retrieve information from GitHub
dic = aid_bin.check_for_updates(VERSION)
#dic = {"Errors":None,"latest_release":latest_release,"latest_release_url":url,"changelog":changelog}
history_callback.emit(dic)
progress_callback.emit(1) #when finished return one
self.threadpool_single_queue = 0 #reset the thread-counter
def actionUpdate_check_function(self):
#Increase the thread-counter by one; only after finishing the thread, it will be reset to 0
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.update_check_worker)
def get_info_from_worker(dic):
#Create a popup window
self.popup_updates = MyPopup()
self.popup_updates_ui = aid_frontend.Ui_Updates()
self.popup_updates_ui.setupUi(self.popup_updates) #open a popup
if dic["Errors"]!=None:#if there is an errror (no internet...)
#display the error in the textbrowser
text = str(dic["Errors"])
elif dic["Errors"]==None:#No errors! Nice
latest_release = dic["latest_release"]
if latest_release=="You are up to date":
text = "Your major version of AIDeveloper is up-to-date. Check below if there are updates available for that major version. <br>Example: Your major version of AIDeveloper is 0.2.0, then all updates which start with 0.2.x will be compatible."
text = "<html><head/><body><p>"+text+"</p></body></html>"
else:
text = "There is a new major update available. To download, follow this link:"
text = text+"<br>"+"<a href ="+dic["latest_release_url"]+">"+dic["latest_release_url"]+"</a>"
text = text+"<br>"+dic["changelog"]
text = text+"<br>Major updates need to be downloaded and installed manually. After that, you can install minor updates (which correspond to that major version) using the menu below."
text = "<html><head/><body><p>"+text+"</p></body></html>"
#Fill info text (on top of Update Popup window)
self.popup_updates_ui.textBrowser_majorVersionInfo.setText(text)
#Fill lineEdit "Your version"
self.popup_updates_ui.lineEdit_yourVersion.setText(VERSION)
#Add updates to the comboBox
self.popup_updates_ui.comboBox_updatesOndevice.addItems(dic["tags_update_ondevice"])
self.popup_updates_ui.comboBox_updatesOnline.addItems(dic["tags_update_online"])
self.popup_updates.show()
self.popup_updates_ui.pushButton_installOndevice.clicked.connect(lambda: self.update_aideveloper("local"))
self.popup_updates_ui.pushButton_installOnline.clicked.connect(lambda: self.update_aideveloper("github"))
self.popup_updates_ui.pushButton_findFile.clicked.connect(self.update_addLocalFile)
worker.signals.history.connect(get_info_from_worker)
self.threadpool_single.start(worker)
def actionTerminology_function(self):
#show a messagebox with link to terminology github page
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "To learn more about machine learning/ deep learning specific terminology, please visit:<br>"
url = "<a href=https://github.com/maikherbig/AIDeveloper/tree/master/Terminology>https://github.com/maikherbig/AIDeveloper/tree/master/Terminology</a>"
text = "<html><head/><body><p>"+text+url+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("ML/DL Terminology")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def update_aideveloper(self,source):
#retrieve the current text on comboBox_availableUpdates
if source=="local":
item_text = str(self.popup_updates_ui.comboBox_updatesOndevice.currentText())
elif source=="github":
item_text = str(self.popup_updates_ui.comboBox_updatesOnline.currentText())
#Length of the version name should not be 0
if len(item_text)==0:
e = "No update available"
aid_frontend.message(e)
return
if source=="local":
#Complete file path (item_text not enough)
item_path = "AIDeveloper_"+item_text+".zip"
item_path = os.path.join(dir_root,item_path)
elif source=="github":
if item_text=="Bleeding edge":
#user want the most recent scripts from GitHub.
downloadprocess = aid_bin.download_aid_repo()
else:
#item_text is a tag of the version. Use tag to download the zip
downloadprocess = aid_bin.download_aid_update(item_text)
#Check if download was successful
if downloadprocess["success"]==False:#if the download was not done show message
message = "Download was not conducted. Probably, the file is already present in:/n"+downloadprocess["path_save"]
aid_frontend.message(message,msg_type="Warning")
return
#Retrieve the path of the zip file (contains the update files)
item_path = downloadprocess["path_save"]
if not os.path.isfile(item_path):#in case that is no file (zip file not created...)
e = "Update requires a zip file. Could not find/create such a file!"
aid_frontend.message(e)
#Perform the update (including backup of current version)
path_backup = aid_bin.update_from_zip(item_path,VERSION)
#message: Installation successful-> need to restart AID
msg = "Update successful. Please restart AIDeveloper. A backup of your previous version is stored in:\n"+path_backup
aid_frontend.message(msg,msg_type="Information")
def update_addLocalFile(self):
#open a filedialog
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose update file', dir_root,"AID update file (*.zip)")
filename = filename[0]
print(filename)
#Check if the file is a zip
if not filename.endswith(".zip"):#file has to be .zip
text = "Chosen file is not a .zip archive!"
aid_frontend.message(msg_text=text,msg_type="Error")
#Check that file exists
if not os.path.isfile(filename):
text = "File not found"
aid_frontend.message(msg_text=text,msg_type="Error")
return
base,_ = os.path.split(filename)
#ensure that filename obeys the name convention: "AIDeveloper_"+tag_name+".zip"
tag_name = datetime.datetime.now().strftime("%Y%m%d_%H-%M-%S")+"-update"
save_name = "AIDeveloper_"+tag_name+".zip"
save_name = os.path.join(dir_root,save_name)
#copy the file to dir_root
shutil.copy(filename,save_name)
#append tag_name to combobox
self.popup_updates_ui.comboBox_updatesOndevice.addItem(tag_name)
text = "Update is now availabele via the Dropdown menu on the left ("+tag_name+")."
text += " The file was copied to:\n"
text += save_name
aid_frontend.message(msg_text=text,msg_type="Information")
def get_validation_data_from_clicked(self,get_normalized=True):
#Check, if files were clicked
SelectedFiles = self.items_clicked_no_rtdc_ds()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
if len(ind)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No validation data was selected. Please use tab 'Build' and drag/drop to load data")
msg.setWindowTitle("No validation data selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return 0
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#Read other model properties from the Ui
norm = self.comboBox_Normalization_2.currentText()
norm = str(norm)
#if normalization method needs mean/std of training set, the metafile needs to be loaded:
if norm == "StdScaling using mean and std of all training data":
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
parameters = pd.read_excel(metafile_path,sheet_name='Parameters')
mean_trainingdata = parameters["Mean of training data used for scaling"]
std_trainingdata = parameters["Std of training data used for scaling"]
else:
mean_trainingdata = None
std_trainingdata = None
crop = int(self.spinBox_Crop_2.value())
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#read self.ram to new variable ; DONT clear ram after since multiple assessments can run on the same data.
DATA = self.ram
#self.ram = dict() #DONT clear the ram here!
############Cropping#####################
X_valid,y_valid,Indices,Xtra_in = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=True means that individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=True means that individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace=True means that individual cells could occur several times
gen = next(gen_valid)
X_valid.append(gen[0])
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(gen[1]) #Cell index to track the event in the data-set(not cell-type!)
Xtra_in.append(gen[2])
X_valid_orig = [X.astype(np.uint8) for X in X_valid]
X_valid = np.concatenate(X_valid)
Xtra_in = np.concatenate(Xtra_in)
# dim = X_valid.shape
# if dim[2]!=crop:
# remove = int(dim[2]/2.0 - crop/2.0)
# #X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
# X_valid = X_valid[:,remove:-remove,remove:-remove] #crop to crop x crop pixels #TensorFlow
print("X_valid has following dimension:")
print(X_valid.shape)
y_valid = np.concatenate(y_valid)
if len(np.array(X_valid).shape)<3:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Discarded all events because too far at border of image (check zooming/cropping settings!)")
msg.setWindowTitle("Empty dataset!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return 0
if get_normalized == True:
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
else:
X_valid = None
dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":rtdc_path_valid,"X_valid_orig":X_valid_orig,"X_valid":X_valid,"y_valid":y_valid,"Indices":Indices,"Xtra_in":Xtra_in}
self.ValidationSet = dic
return 1
def export_valid_to_rtdc(self):
if not type(self.ValidationSet) is type(None): #If ValidationSet is not none, there has been a ValidationSet loaded already
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Re-used validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM")
msg.setWindowTitle("Re-Used data")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
worked = 1
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
worked = self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
if worked==0:
return
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
X_valid = []
X_valid.append(self.ValidationSet["X_valid"][:,:,:,0])
X_valid_orig = self.ValidationSet["X_valid_orig"]
Xtra_in = self.ValidationSet["Xtra_in"]
Indices = self.ValidationSet["Indices"]
y_valid = self.ValidationSet["y_valid"]
#Get a filename from the user for the new file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to rtdc', Default_dict["Path of last model"],"rtdc file (*.rtdc)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#add the suffix _Valid_Data.avi or _Valid_Labels.npy
if not filename.endswith(".rtdc"):
filename = filename +".rtdc"
filename_X = filename.split(".rtdc")[0]+"_Valid_Data.rtdc"
filename_y = filename.split(".rtdc")[0]+"_Valid_Labels.txt"
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You could choose a different Exporting option in ->Option->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
aid_bin.write_rtdc(filename_X,rtdc_path_valid,X_valid_orig,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=Xtra_in)
np.savetxt(filename_y,y_valid.astype(int),fmt='%i')
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def import_valid_from_rtdc(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Valid_Data.rtdc', Default_dict["Path of last model"],".rtdc file (*_Valid_Data.rtdc)")
filename = filename[0]
rtdc_path = filename
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Load the corresponding labels
filename_labels = filename.split("Data.rtdc")[0]+"Labels.txt"
if not os.path.isfile(filename_labels):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No corresponding _Labels.npy file found! Expected it here: "+filename_labels)
msg.setWindowTitle("No Labels found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
y_valid = np.loadtxt(filename_labels).astype(int)
#Inform user (statusbar message)
self.statusbar.showMessage("Loaded labels from "+filename_labels,5000)
#Read images from .rtdc file
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Load meta file
#filename_meta = filename.split("Valid_Data.rtdc")[0]+"meta.xlsx"
#Make the Image dimensions matching the requirements of the model
model_in = int(self.spinBox_Crop_2.value())
model_out = int(self.spinBox_OutClasses_2.value())
color_mode = str(self.comboBox_loadedRGBorGray.currentText())
# if color_mode=='RGB': #User wants RGB images
# target_channels = 3
# if color_mode=='Grayscale': # User want to have Grayscale
# target_channels = 1
if model_in==1 and model_out==1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please first define a model. The validation data will then be cropped according to the required model-input size")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
x_valid = np.array(rtdc_ds["events"]["image"])
#dim = x_valid.shape[1]
#channels = x_valid.shape[-1]
#Get further image processing settings from self.
zoom_factor = float(self.img_processing_settings["zoom_factor"].values[0])
zoom_interpol_method = str(self.img_processing_settings["zoom_interpol_method"].values[0])
padding_mode = str(self.img_processing_settings["padding_mode"].values[0])
#normalization_method = str(self.img_processing_settings["normalization_method"].values[0])
norm = self.comboBox_Normalization_2.currentText()
norm = str(norm)
mean_trainingdata = self.img_processing_settings["mean_trainingdata"].values[0]
std_trainingdata = self.img_processing_settings["std_trainingdata"].values[0]
gen_valid = aid_img.gen_crop_img(cropsize=model_in,rtdc_path=rtdc_path,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_interpol_method,color_mode=color_mode,padding_mode=padding_mode,xtra_in=False)
x_valid,index,xtra_valid = next(gen_valid)
#When object is too far at side of image, the frame is dropped.
#Consider this for y_valid
y_valid = y_valid[index]
if not model_in==x_valid.shape[-2]:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model input dimension ("+str(model_in)+"x"+str(model_in)+"pix) and validation data dimension ("+str(x_valid.shape)+") do not match")
msg.setWindowTitle("Wrong image dimension")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Normalize the images
X_valid_orig = np.copy(x_valid) #copy the cropped but non-normalized images
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(x_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(x_valid,norm)
Indices = np.array(range(X_valid.shape[0])) #those are just indices to identify single cells in the file ->not cell-type indices!
SelectedFiles_valid = None #[].append(rtdc_path)#
nr_events_epoch_valid = None
rtdc_h5 = h5py.File(rtdc_path, 'r')
try:
Xtra_in = np.array(rtdc_h5["xtra_in"])[index]
except:
Xtra_in = []
rtdc_h5.close() #close the hdf5
dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":[rtdc_path],"X_valid_orig":[X_valid_orig],"X_valid":X_valid,"y_valid":y_valid,"Indices":[Indices],"Xtra_in":Xtra_in}
self.ValidationSet = dic
self.statusbar.showMessage("Validation data loaded to RAM",5000)
#Update the table
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)#Reset table
self.tableWidget_Info_2.setRowCount(0)#Reset table
self.tableWidget_Info_2.setColumnCount(4) #Four columns
nr_ind = len(set(y_valid)) #number of different labels ("indices")
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Total nr of cells for each index
for index in np.unique(y_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(y_valid==index)[0]
nr_events_epoch = len(ind)
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def cm_interaction(self,item):
"""
Grab validation data of particular class, load the scores (model.predict)
and save images to .rtdc, or show them (users decision)
first, "assess_model_plotting" has the be carried out
"""
true_label = item.row()
predicted_label = item.column()
#If there is X_valid and y_valid on RAM, use it!
if not type(self.ValidationSet) is type(None): #If X_valid is not none, there has been X_valid loaded already
self.statusbar.showMessage("Re-used validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM",2000)
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
self.statusbar.showMessage("Loaded data corresponding to the clicked files on 'Build'-tab",2000)
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
X_valid_orig = self.ValidationSet["X_valid_orig"] #cropped but non-normalized images
Indices = self.ValidationSet["Indices"]
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
dic = self.Metrics #gives {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
if len(dic)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Data was altered. Please run 'Update Plots' again")
msg.setWindowTitle("Data has changed")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
pred = dic["pred"]
#get the length of each Index-list,
lengths = [len(l) for l in Indices]
starts = np.cumsum(lengths)
ToSave, y_valid_list, Indices_ = [],[],[] #list; store images remaining to indiv. .rtdc set in there
starts = np.array([0]+list(starts))
for i in range(len(lengths)):
y_val = y_valid[starts[i]:starts[i+1]]
pred_ = pred[starts[i]:starts[i+1]]
#update the indx to prepare for next iteration
#indx = lengths[i]
ind = np.where( (y_val==true_label) & (pred_==predicted_label) )[0] #select true_label cells and which of them are clasified as predicted_label
#Grab the corresponding images
ToSave.append(X_valid_orig[i][ind,:,:]) #get non-normalized X_valid to new variable
#X_valid_.append(X_valid[i][ind,:,:]) #get normalized/cropped images ready to run through the model
y_valid_list.append(y_val[ind])
Indices_.append(Indices[i][ind]) #get non-normalized X_valid to new variable
total_number_of_chosen_cells = [len(a) for a in y_valid_list]
total_number_of_chosen_cells = np.sum(np.array(total_number_of_chosen_cells))
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Show images/heatmap or save to .rtdc/.png/.jpg?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Show or save?")
msg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Save | QtGui.QMessageBox.Cancel)
show = msg.button(QtGui.QMessageBox.Yes)
show.setText('Show image/heatmap')
# show = msg.button(QtGui.QMessageBox.YesToAll)
# show.setText('Show image/heatmap')
save_png = msg.button(QtGui.QMessageBox.Save)
save_png.setText('Save to .rtdc/.png/.jpg...')
cancel = msg.button(QtGui.QMessageBox.Cancel)
cancel.setText('Cancel')
msg.exec_()
#View image and heatmap overlay (Grad-CAM)
if msg.clickedButton()==show: #show image and heatmap overlay
if total_number_of_chosen_cells==0:
return
#Get the images that were passed through the model for prediction
X_valid = self.ValidationSet["X_valid"] #cropped but non-normalized images
ind = np.where( (y_valid==true_label) & (pred==predicted_label) )[0] #select true_label cells and which of them are clasified as predicted_label
X_valid_ = X_valid[ind]
#Popup window to show images and settings
self.popup_gradcam = QtGui.QDialog()
self.popup_gradcam_ui = aid_frontend.popup_cm_interaction()
self.popup_gradcam_ui.setupUi(self.popup_gradcam) #open a popup to show images and options
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
#self.popup_gradcam.setWindowModality(QtCore.Qt.ApplicationModal)
#Fill Model info
self.popup_gradcam_ui.lineEdit_loadModel.setText(self.load_model_path)
in_dim = int(self.spinBox_Crop_2.value()) #grab value from Assess Tab
self.popup_gradcam_ui.spinBox_Crop_inpImgSize.setValue(in_dim)#insert value into popup
out_dim = int(self.spinBox_OutClasses_2.value()) #grab value from Assess Tab
self.popup_gradcam_ui.spinBox_outpSize.setValue(out_dim) #insert value into popup
self.popup_gradcam_ui.spinBox_gradCAM_targetClass.setMaximum(out_dim-1)
#For the grad_cam the name of the final conv-layer needs to be selected
convlayers = [layer.name for layer in self.model_keras.layers if len(layer.output_shape)==4]
convlayers = convlayers[::-1] #reverse list
self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.addItems(convlayers)
#Connect buttons to functions
self.popup_gradcam_ui.pushButton_update.clicked.connect(lambda: self.popup_cm_show_update(ToSave,X_valid_))
self.popup_gradcam_ui.pushButton_reset.clicked.connect(self.popup_cm_reset)
self.popup_gradcam_ui.pushButton_showSummary.clicked.connect(self.popup_show_model_summary)
self.popup_gradcam_ui.pushButton_toTensorB.clicked.connect(self.popup_to_tensorboard)
#Get the original image
img_display = np.concatenate(ToSave)
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
self.popup_gradcam_ui.widget_image.setImage(img_display)
self.popup_gradcam.show()
#For .rtdc/.png... saving
elif msg.clickedButton()==save_png: #Save to .rtdc/.png/.jpg/...
if total_number_of_chosen_cells==0:
return
sumlen = np.sum(np.array([len(l) for l in ToSave]))
self.statusbar.showMessage("Nr. of target cells above threshold = "+str(sumlen),2000)
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to .rtdc/.png/.jpg', Default_dict["Path of last model"],"File format (*.rtdc *.png *.jpg *.bmp *.eps *.gif *.ico *.icns)")
filename = filename[0]
if len(filename)==0:
return
filename_X, file_extension = os.path.splitext(filename)#divide into path and file_extension if possible
#Check if chosen file_extension is valid
if not file_extension in [".rtdc",".png",".jpg",".bmp",".eps",".gif",".ico",".icns"]:
print("Invalid file extension detected. Will use .png instead.")
file_extension = ".png"
if file_extension==".rtdc":#user wants to save to .rtdc
#add the suffix _Valid_Data.rtdc or _Valid_Labels.txt
if not filename.endswith(".rtdc"):
filename = filename +".rtdc"
filename_X = filename.split(".rtdc")[0]+"_Valid_Data.rtdc"
filename_y = filename.split(".rtdc")[0]+"_Valid_Labels.txt"
#Save the labels
y_valid_list = np.concatenate(y_valid_list)
#Save the .rtdc data (images and all other stuff)
#Should cropped or original be saved?
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
if bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You may want to choose a different exporting option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
np.savetxt(filename_y,y_valid_list.astype(int),fmt='%i')
aid_bin.write_rtdc(filename_X,rtdc_path_valid,ToSave,Indices_,cropped=save_cropped,color_mode=self.get_color_mode())
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
else: #some image file format was chosen
#Should cropped or original be saved?
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
if bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You may want to choose a different exporting option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Save the images data to .png/.jpeg...
index = 0
for imgs in ToSave:
for img in imgs:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def popup_cm_show_update(self,ToSave,X_valid_):
#ui_item = self.popup_gradcam_ui
#grab information from the popup window
show_image = bool(self.popup_gradcam_ui.groupBox_image_Settings.isChecked())
show_gradCAM = bool(self.popup_gradcam_ui.groupBox_gradCAM_Settings.isChecked())
alpha_1 = float(self.popup_gradcam_ui.doubleSpinBox_image_alpha.value())
alpha_2 = float(self.popup_gradcam_ui.doubleSpinBox_gradCAM_alpha.value())
layer_name = str(self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.currentText()) #self.model_keras exists after assess_model_plotting was carried out
class_ = int(self.popup_gradcam_ui.spinBox_gradCAM_targetClass.value())
colormap = str(self.popup_gradcam_ui.comboBox_gradCAM_colorMap.currentText()) #self.model_keras exists after assess_model_plotting was carried out
colormap = "COLORMAP_"+colormap
colormap = getattr(cv2, colormap)
currentindex = self.popup_gradcam_ui.widget_image.currentIndex
if show_image and not show_gradCAM:
#Get the original image for display
img_display = np.concatenate(ToSave)
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
if show_gradCAM:#grad-Cam is on
img_display = np.concatenate(ToSave)
#compare model input dim and dom of provided data
in_model = self.model_keras.input.shape.as_list()[1:]
in_data = list(X_valid_.shape[1:])
channels_model = in_model[-1]
if not in_data==in_model:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg = "Model input dimension ("+str(in_model)+") not equal to dim. of input data ("+str(in_data)+")"
msg.setText(msg)
msg.setWindowTitle("Input dimension error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
img2 = aid_dl.grad_cam(self.load_model_path, X_valid_, class_, layer_name)#Carry out grad-cam
img2 = [cv2.applyColorMap(cam_, colormap) for cam_ in img2]#create colormap returns BGR image!
img2 = [cv2.cvtColor(cam_, cv2.COLOR_BGR2RGB) for cam_ in img2]#convert to RGB
#in case img_display is grayscale, mimick rgb image by stacking
if channels_model==1:
print("Triple stacking grayscale channel")
img_display = [np.stack((img_display_,)*3, axis=-1) for img_display_ in img_display]
#add heatmap to image, make sure alpha_1=0 if show_image=False
img_display = [cv2.addWeighted(img_display[i], alpha_1, img2[i], alpha_2, 0) for i in range(X_valid_.shape[0])]
#ToDo: this only works for RGB images. Adjust expression to work or grayscale and RGB
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
self.popup_gradcam_ui.widget_image.setImage(img_display)
self.popup_gradcam_ui.widget_image.setCurrentIndex(currentindex)
self.popup_gradcam.show()
def popup_cm_reset(self):
self.popup_gradcam_ui.groupBox_image_Settings.setChecked(True)
self.popup_gradcam_ui.groupBox_gradCAM_Settings.setChecked(False)
#self.popup_gradcam_ui.doubleSpinBox_image_alpha.setValue(1)
self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.setCurrentIndex(0)
#self.popup_gradcam_ui.comboBox_gradCAM_colorMap.setCurrentIndex(0)
self.popup_gradcam_ui.spinBox_gradCAM_targetClass.setValue(0)
def popup_show_model_summary(self):
#textbrowser popup
self.popup_modelsummary = MyPopup()
self.popup_modelsummary_ui = aid_frontend.popup_cm_modelsummary()
self.popup_modelsummary_ui.setupUi(self.popup_modelsummary) #open a popup to show images and options
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.popup_modelsummary_ui.textBrowser_modelsummary.append(text)
self.popup_modelsummary.show()
def popup_to_tensorboard(self):
#Open the model in tensorboard
#Issue: I cannot stop the process. The appraoch below, which uses a
#separate thread for the function does not solve the issue
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.tensorboad_worker)
def get_pid_from_worker(dic):
pid = dic["outp"]
#print("WORKER-PID")
#print("pid")
#os.kill(pid,signal.CTRL_C_EVENT)
#ToDo Find a way to kill that process!
worker.signals.history.connect(get_pid_from_worker)
self.threadpool_single.start(worker)
#print("PID-Here:")
#print(os.getpid())
#time.sleep(2)
def tensorboad_worker(self,progress_callback,history_callback):
#send the model to tensorboard (webbased application)
with tf.Session() as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
graph = K.get_session().graph # Get the sessions graph
#get a folder for that model in temp
temp_path = aid_bin.create_temp_folder()
modelname = os.path.split(self.load_model_path)[-1]
modelname = modelname.split(".model")[0]
log_dir = os.path.join(temp_path,modelname)
writer = tf.summary.FileWriter(logdir=log_dir, graph=graph)#write a log
#tb = program.TensorBoard()
tb = program.TensorBoard(default.get_plugins(), default.get_assets_zip_provider())
#tb.configure(argv=[None, '--logdir', log_dir,"--host","127.0.0.1"])
tb.configure(argv=[None, '--logdir', log_dir,"--host","localhost"])
url = tb.launch()
url = os.path.join(url)
os.system(r"start "+url)
pid = os.getpid()
dic = {"outp":pid}
#print("WORKER1-PID")
#print(pid)
history_callback.emit(dic) #return the pid (use it to kill the process)
self.threadpool_single_queue = 0 #reset the thread-counter
time.sleep(0.5)
def copy_cm_to_clipboard(self,cm1_or_cm2):
if cm1_or_cm2==1:
table = self.tableWidget_CM1
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
elif cm1_or_cm2==2:
table = self.tableWidget_CM2
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
elif cm1_or_cm2==3: #this is for the classification report table tableWidget_AccPrecSpec
table = self.tableWidget_AccPrecSpec
cols = table.columnCount()
header = list(range(cols))
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
tmp_df.to_clipboard()
if cm1_or_cm2<3:
self.statusbar.showMessage("Confusion matrix appended to clipboard.",2000)
if cm1_or_cm2==3:
self.statusbar.showMessage("Classification report appended to clipboard.",2000)
def assess_model_plotting(self):
if self.load_model_path == None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If there is a ValidationSet on RAM-> use it!
if not type(self.ValidationSet) is type(None): #If ValidationSet is not none, there has been ValidationSet loaded already
self.statusbar.showMessage("Use validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM",5000)
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
#Check if input data is available
if type(self.ValidationSet)==type(None):
return
elif type(self.ValidationSet["X_valid"])==type(None):
return
#Check the input dimensions:
img_dim = self.ValidationSet["X_valid"].shape[-2]
model_in = int(self.spinBox_Crop_2.value())
if model_in!=img_dim:
self.ValidationSet = None
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("New model has different input dimensions (image crop). Validation set is re-loaded (like when you clicked on files on build-tab)")
msg.setWindowTitle("Automatically re-loaded validation set")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
#X_valid = self.X_valid #<--dont do this since it is used only once (.predict) and it would require additional RAM; instad use self.X_valid for .predict
#Load the model and predict
with tf.Session() as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
self.model_keras = model_keras #useful to get the list of layers for Grad-CAM; also used to show the summary
in_dim = model_keras.get_input_shape_at(node_index=0)
if type(in_dim)==list:
multi_input = True
in_dim = in_dim[0]#discard the second (xtra input)
else:
multi_input = False
channels_model = in_dim[-1]
channels_data = self.ValidationSet["X_valid"].shape[-1]
#Compare channel dimensions of loaded model and validation set
if channels_model==3 and channels_data==1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Model expects 3 channels, but data has 1 channel!"
text = text+" Will stack available channel three times to generate RGB image."
msg.setText(text)
msg.setWindowTitle("Automatic adjustment of image channels")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#model wants rgb images, but provided data is grayscale->copy and stack 3 times
self.ValidationSet["X_valid"] = np.stack((self.ValidationSet["X_valid"][:,:,:,0],)*3, axis=-1)
elif channels_model==1 and channels_data==3:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Model expects 1 channel, but data has 3 channels!"
text = text+" Will use the luminosity formula to convert RGB to grayscale."
msg.setText(text)
msg.setWindowTitle("Automatic adjustment of image channels")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#model wants grayscale, but provided data is rgb
self.ValidationSet["X_valid"] = aid_img.rgb_2_gray(self.ValidationSet["X_valid"])
elif channels_model!=channels_data: #Model and validation data have differnt channel dims
text = "Model expects "+str(int(channels_model))+" channel(s), but data has "+str(int(channels_data))+" channel(s)!"
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Model and data channel dimension not equal!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if multi_input == False:
scores = model_keras.predict(self.ValidationSet["X_valid"])
if multi_input == True:
print("self.ValidationSet[Xtra_in]")
print(self.ValidationSet["Xtra_in"])
scores = model_keras.predict([self.ValidationSet["X_valid"],self.ValidationSet["Xtra_in"]])
#Get settings from the GUI
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
thresh_on = bool(self.checkBox_SortingThresh.isChecked())
#Check that the target index alias "Sorting class" is actually a valid class of the model
out_dim = int(self.spinBox_OutClasses_2.value())
if not target_index<out_dim:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("You set the 'Sorting class' to "+str(target_index)+" which is not a valid class of the loaded model. The model only has the following classes: "+str(range(out_dim)))
msg.setWindowTitle("Class not available in the model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
dic = aid_bin.metrics_using_threshold(scores,y_valid,threshold,target_index,thresh_on) #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
self.Metrics = dic #write to a variable #
pred = dic["pred"]
cm = metrics.confusion_matrix(y_valid,pred,labels=range(scores.shape[1]))
cm_normalized = 100*cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#Show the metrics on tableWidget_CM1 and tableWidget_CM2
#inds_uni = set(list(set(y_valid))+list(set(pred))) #It could be that a cell-index is not present in the validation data, but, the dimension of the scores tells me, how many indices are supposed to appear
inds_uni = range(scores.shape[1]) #these indices are explained by model
#look in into tableWidget_Info_2 if there are user defined index names
rowCount = self.tableWidget_Info_2.rowCount()
#Only counts rows with input
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
try:
indices_on_table = [int(self.tableWidget_Info_2.item(row, 0).text()) for row in range(rowCount)]
names_on_table = [str(self.tableWidget_Info_2.item(row, 3).text()) for row in range(rowCount)]
except Exception as e:
#There was an error!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Check that len(names_on_table) <= len(inds_uni) ->it is impossible that the model for example can predict 2 classes, but there are 3 different classes in the validation set
if not len(names_on_table) <= len(inds_uni):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model can only predict "+str(len(inds_uni))+" classes, but validation data contains "+str(len(names_on_table))+" classes")
msg.setWindowTitle("Too many classes in validation set")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#return
CellNames = []
for ind in inds_uni:
#check if that index is present on table
where = np.where(np.array(indices_on_table)==ind)[0]
if len(where)==1:#if there is exaclty one item...
CellNames.append(np.array(names_on_table)[where]) #append the corresponding user defined name to a list
else:
CellNames.append(str(ind))
header_labels = [i[0] for i in CellNames]#list(inds_uni)]
#Table for CM1 - Total Nr of cells
self.tableWidget_CM1.setRowCount(len(inds_uni))
self.tableWidget_CM1.setColumnCount(len(inds_uni))
self.tableWidget_CM1.setHorizontalHeaderLabels(header_labels)
self.tableWidget_CM1.setVerticalHeaderLabels(header_labels)
for i in inds_uni:
for j in inds_uni:
rowPosition = i
colPosition = j
#Total nr of cells for each index
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(cm[i,j]))
self.tableWidget_CM1.setItem(rowPosition, colPosition, item)
self.tableWidget_CM1.resizeColumnsToContents()
self.tableWidget_CM1.resizeRowsToContents()
#Table for CM2 - Normalized Confusion matrix
self.tableWidget_CM2.setRowCount(len(inds_uni))
self.tableWidget_CM2.setColumnCount(len(inds_uni))
self.tableWidget_CM2.setHorizontalHeaderLabels(header_labels)
self.tableWidget_CM2.setVerticalHeaderLabels(header_labels)
for i in range(len(inds_uni)):
for j in range(len(inds_uni)):
rowPosition = i
colPosition = j
#Total nr of cells for each index
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(cm_normalized[i,j]))
self.tableWidget_CM2.setItem(rowPosition, colPosition, item)
self.tableWidget_CM2.resizeColumnsToContents()
self.tableWidget_CM2.resizeRowsToContents()
############Fill tableWidget_AccPrecSpec with information##########
#Compute more metrics and put them on the table below
nr_target_init = float(len(np.where(y_valid==target_index)[0])) #number of target cells in the initial sample
conc_init = nr_target_init/float(len(y_valid)) #concentration of the target cells in the initial sample
acc = metrics.accuracy_score(y_valid,pred)
#Reset the table
self.tableWidget_AccPrecSpec.setColumnCount(0)#Reset table
self.tableWidget_AccPrecSpec.setRowCount(0)#Reset table
nr_cols = np.max([5,len(inds_uni)+1])
self.tableWidget_AccPrecSpec.setColumnCount(nr_cols) #Five columns
self.tableWidget_AccPrecSpec.setRowCount(7+len(inds_uni)+2) #Nr. of rows
#Put lots and lots of Info on tableWidget_AccPrecSpec
text_conc_init = "Init. conc. of cells from class/name "+header_labels[target_index]
self.tableWidget_AccPrecSpec.setItem(0 , 0, QtGui.QTableWidgetItem(text_conc_init))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(100*conc_init,4)))
self.tableWidget_AccPrecSpec.setItem(0, 1, item)
text_conc_final = "Final conc. in target region"
self.tableWidget_AccPrecSpec.setItem(1 , 0, QtGui.QTableWidgetItem(text_conc_final))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["conc_target_cell"],4)))
self.tableWidget_AccPrecSpec.setItem(1, 1, item)
text_enrich = "Enrichment"
self.tableWidget_AccPrecSpec.setItem(2 , 0, QtGui.QTableWidgetItem(text_enrich))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["enrichment"],4)))
self.tableWidget_AccPrecSpec.setItem(2, 1, item)
text_yield = "Yield"
self.tableWidget_AccPrecSpec.setItem(3 , 0, QtGui.QTableWidgetItem(text_yield))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["yield_"],4)))
self.tableWidget_AccPrecSpec.setItem(3, 1, item)
text_acc = "Accuracy"#+str(round(acc,4))+"\n"
self.tableWidget_AccPrecSpec.setItem(4 , 0, QtGui.QTableWidgetItem(text_acc))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(acc,4)))
self.tableWidget_AccPrecSpec.setItem(4, 1, item)
text_classification_report = "Classification Report"#+metrics.classification_report(y_valid, pred, target_names=header_labels)
self.tableWidget_AccPrecSpec.setItem(5 , 0, QtGui.QTableWidgetItem(text_classification_report))
class_rep = metrics.classification_report(y_valid, pred,labels=inds_uni, target_names=header_labels,output_dict =True)
try:
df = pd.DataFrame(class_rep)
df = df.T
ax_left = df.axes[0]
for row in range(len(ax_left)):
self.tableWidget_AccPrecSpec.setItem(7+row, 0, QtGui.QTableWidgetItem(str(ax_left[row])))
ax_up = df.axes[1]
for col in range(len(ax_up)):
self.tableWidget_AccPrecSpec.setItem(6, 1+col, QtGui.QTableWidgetItem(str(ax_up[col])))
for row in range(df.shape[0]):
for col in range(df.shape[1]):
val = df.iloc[row,col]
val = float(np.round(val,4))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, val)
self.tableWidget_AccPrecSpec.setItem(7+row, 1+col, item)
except Exception as e:
#There is an issue loading the files!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
self.tableWidget_AccPrecSpec.resizeColumnsToContents()
self.tableWidget_AccPrecSpec.resizeRowsToContents()
#AFTER the table is resized to the contents, fill in also information
#about the used data:
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert a new row
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem("Used Files"))
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert another row!
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem("File"))
#dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":[rtdc_path],"X_valid_orig":[X_valid_orig],"X_valid":X_valid,"y_valid":y_valid,"Indices":[Indices]}
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
#nr_events_epoch_valid = self.ValidationSet["nr_events_epoch_valid"]
y_valid = self.ValidationSet["y_valid"] #y_valid is a long array containing the label of all cell (of all clicked files)
Indices = self.ValidationSet["Indices"] #Index is a list with arrays containing cell-indices (to track events in a data-set)
y_valid_uni = np.unique(np.array(y_valid),return_counts=True)
#set the column count the at least match the amount of different cell-types available
if self.tableWidget_AccPrecSpec.columnCount() < len(y_valid_uni[0]):
diff = len(y_valid_uni[0])-self.tableWidget_AccPrecSpec.columnCount()
for col_ind in range(diff):
colPosition = self.tableWidget_AccPrecSpec.columnCount()
self.tableWidget_AccPrecSpec.insertColumn(colPosition) #Insert a new col for each cell-type
#Create a column for each cell-type
for col_ind in range(len(y_valid_uni[0])):
#how_many = y_valid_uni[1][col_ind]
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+col_ind, QtGui.QTableWidgetItem(float(how_many)))
content = "Class "+str(y_valid_uni[0][col_ind])
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, content)
self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+col_ind, item)
loc = 0
for row in range(len(rtdc_path_valid)):
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert a new row for each entry
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem(rtdc_path_valid[row]))
#y_valid_uni = np.unique(y_valid[row])
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 1, QtGui.QTableWidgetItem(np.array(y_valid_uni)))
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 2, QtGui.QTableWidgetItem(float(nr_events_epoch_valid[row])))
index = Indices[row] #get the array of indices of a single measurement
y_valid_i = y_valid[loc:loc+len(index)]
loc = loc+len(index)
y_valid_i_uni = np.unique(y_valid_i,return_counts=True)
for col_ind in range(len(y_valid_i_uni[0])):
#what is the cell-type
cell_type = int(y_valid_i_uni[0][col_ind])#cell-type index alway starts with 0. Nr. of cells of cell-type 0 remain to column 1
how_many = y_valid_i_uni[1][col_ind]
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, int(how_many))
self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+cell_type, item)
#Draw the probability histogram
self.probability_histogram()
#Finally, also update the third plot
self.thirdplot()
def create_random_table(self):
print("def create_random_table only useful for development")
# matrix = np.random.randint(0,100,size=(3,3))
# self.tableWidget_CM1.setRowCount(matrix.shape[0])
# self.tableWidget_CM1.setColumnCount(matrix.shape[1])
#
# for i in range(matrix.shape[0]):
# for j in range(matrix.shape[1]):
# item = QtWidgets.QTableWidgetItem()
# item.setData(QtCore.Qt.EditRole,str(matrix[i,j]))
# self.tableWidget_CM1.setItem(i, j, item)
#
# self.tableWidget_CM1.resizeColumnsToContents()
# self.tableWidget_CM1.resizeRowsToContents()
def probability_histogram(self): # def probability_histogram(self):
"""
Grab the scores of each class and show it in histogram
"""
if len(self.Metrics) ==0: #but if not give message and return
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There are no Metrics determined yet (use ->'Update Plots' first)")
msg.setWindowTitle("No Metrics found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
dic = self.Metrics #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
scores = dic["scores"]
#Get the available cell indices (cell-type identifier)
inds_uni = range(scores.shape[1]) #these indices are explained by model
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
try:
#What is the probability of cell with y_valid=i that it belongs to class target_index?
scores_i = []
y_valid = self.ValidationSet["y_valid"]
for i in inds_uni:
ind = np.where(y_valid==i)[0]
if len(ind)>0: #if there are no cells available, dont append. In this case there will also be no color defined
scores_i.append(scores[ind,target_index])
except Exception as e:
#There is an issue loading the files!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
names_on_table = [str(self.tableWidget_Info_2.item(row, 3).text()) for row in range(rowCount)]
index_on_table = [int(self.tableWidget_Info_2.item(row, 0).text()) for row in range(rowCount)]
#On which row is the target_index?
ind = np.where(np.array(index_on_table)==target_index)[0]
if len(ind) == 1:
target_name = str(np.array(names_on_table)[ind][0])
else:
target_name = str(target_index)
#Get the user-defined colors from table
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#it can be that the table was not updated and there are more scores than table-items
if len(colors_on_table)!=len(scores_i):
#update table
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
#update colors on table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_probHistPlot.clear()
#Add plot
hist = self.widget_probHistPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.setLabel('bottom', "p("+target_name+")", units='')
hist.setLabel('left', "#", units='')
#Get the user defined histogram style from the combobox
style = str(self.comboBox_probability_histogram.currentText())
for i in range(len(scores_i)): # I had previously range(len(scores_i)); but this causes an error if there is a cell-type missing in the validation set
hist_i = hist.plot()
if len(scores_i[i])>1:#only continue of there multiple events (histogram does not make sense otherwise)
range_hist = (scores_i[i].min(), scores_i[i].max())
first_edge, last_edge = np.lib.histograms._get_outer_edges(scores_i[i], range=range_hist)
try: #numpy 1.15
width = np.lib.histograms._hist_bin_selectors['auto'](scores_i[i])
except:#numpy >1.15
width = np.lib.histograms._hist_bin_selectors['auto'](scores_i[i],(np.min(scores_i[i]),np.min(scores_i[i])))
try:#prevent crash if width=0
n_equal_bins = int(np.ceil(np.lib.histograms._unsigned_subtract(last_edge, first_edge) / width))
except:
n_equal_bins = 1
if n_equal_bins>1E4: #Who needs more than 10k bins?!:
n_equal_bins = int(1E4)
else:
n_equal_bins='auto'
y,x = np.histogram(scores_i[i], bins=n_equal_bins)
if style=="Style1":
pencolor = pg.mkColor(colors_on_table[i].color())
pen = pg.mkPen(color=pencolor,width=5)
hist_i.setData(x, y, stepMode=True, pen=pen,clear=False)
elif style=="Style2":
pencolor = pg.mkColor(colors_on_table[i].color())
pen = pg.mkPen(color=pencolor,width=10)
hist_i.setData(x, y, stepMode=True, pen=pen,clear=False)
elif style=="Style3":
color = colors_on_table[i].color()
color.setAlpha(0.6*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
elif style=="Style4":
color = colors_on_table[i].color()
color.setAlpha(0.7*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
elif style=="Style5":
color = colors_on_table[i].color()
color.setAlpha(0.8*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
#Add a vertical line indicating the threshold
self.line = pg.InfiniteLine(pos=threshold, angle=90, pen='w', movable=False)
hist.addItem(self.line)
hist.setXRange(0, 1, padding=0)
def thirdplot(self):
target_index =self.spinBox_indexOfInterest.value()
cb_text = self.comboBox_3rdPlot.currentText()
if cb_text=='None':
return
if cb_text=='ROC-AUC':
#Check if self.Metrics are available
if len(self.Metrics) == 0:
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #If no Metrics available yet...
dic = self.Metrics
if len(dic)==0:
return
#Get the ValidationSet
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
scores = dic["scores"]
inds_uni = list(range(scores.shape[1])) #these indices are explained by model
#ROC-curve is only available for binary problems:
Y_valid = np_utils.to_categorical(y_valid,num_classes=len(inds_uni))
# Compute ROC curve and ROC area for each class
fpr,tpr,roc_auc = dict(),dict(),dict()
for i in range(len(inds_uni)):
fpr[i], tpr[i], _ = metrics.roc_curve(Y_valid[:, i], scores[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(Y_valid.ravel(), scores.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
#Get the user-defined colors from table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "False Positive Rate", units='')
hist.setLabel('left', "True Positive Rate", units='')
for i, color in zip(range(len(inds_uni)), colors_on_table):
text = 'Class '+str(i)+', AUC='+str(round(roc_auc[i],2))
hist.plot(fpr[i], tpr[i], pen=None,symbol='o',symbolPen=None,symbolBrush=color,name=text,clear=False)
clr = color.color()
hist.plot(fpr[i],tpr[i],pen=clr)
hist.setXRange(0, 1, padding=0)
if cb_text=='Precision-Recall':
#Check if self.Metrics are available
if len(self.Metrics) == 0:
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #Otherwise, there are Metrics available already :) Use them
dic = self.Metrics
if len(dic)==0:
return
#Get the ValidationSet
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
scores = dic["scores"]#[:,target_index]
inds_uni = list(range(scores.shape[1])) #these indices are explained by model
#ROC-curve is only available for binary problems:
Y_valid = np_utils.to_categorical(y_valid,num_classes=len(inds_uni))
# Compute Precision Recall curve and P-R area for each class
precision,recall,precision_recall_auc = dict(),dict(),dict()
for i in range(len(inds_uni)):
precision[i], recall[i], _ = metrics.precision_recall_curve(Y_valid[:, i], scores[:, i])
precision_recall_auc[i] = metrics.auc(recall[i], precision[i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = metrics.roc_curve(Y_valid.ravel(), scores.ravel())
precision_recall_auc["micro"] = metrics.auc(recall["micro"],precision["micro"])
#Get the user-defined colors from table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "Recall", units='')
hist.setLabel('left', "Precision", units='')
for i, color in zip(range(len(inds_uni)), colors_on_table):
text = 'Class '+str(i)+', AUC='+str(round(precision_recall_auc[i],2))
hist.plot(recall[i],precision[i], pen=None,symbol='o',symbolPen=None,symbolBrush=color,name=text,clear=False)
clr = color.color()
hist.plot(recall[i],precision[i],pen=clr)
hist.setXRange(0, 1, padding=0)
if cb_text=='Enrichment vs. Threshold' or cb_text=='Yield vs. Threshold' or cb_text=='Conc. vs. Threshold':
#Check if self.Metrics are available
if len(self.Metrics) == 0: #if not,
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #If Metrics are already available, use it. Load it
dic = self.Metrics
if len(dic)==0:
return
scores = dic["scores"]
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
#The dic only contains metrics for a single threshold, which is not enough
#call aid_bin.metrics_using_threshold with a range of thresholds:
#(it might makes sense to evaluate this for each possible target_index. Now only perform the measurement for the user defined target index)
Dics,Threshs = [],[]
for thresh in np.linspace(0,1,25):
dic_ = aid_bin.metrics_using_threshold(scores,y_valid,thresh,target_index) #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
Dics.append(dic_)
Threshs.append(thresh)
#Collect information in arrays
enrichment_ = np.array([d["enrichment"] for d in Dics])
yield__ = np.array([d["yield_"] for d in Dics])
conc_target_cell = np.array([d["conc_target_cell"] for d in Dics])
Threshs = np.array(Threshs)
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "Threshold", units='')
color = '#0000ff'
if cb_text=='Enrichment vs. Threshold':
hist.setLabel('left', "Enrichment", units='')
hist.plot(Threshs,enrichment_, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,enrichment_,pen=color)
if cb_text=='Yield vs. Threshold':
hist.setLabel('left', "Yield", units='')
hist.plot(Threshs,yield__, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,yield__,pen=color)
if cb_text=='Conc. vs. Threshold':
hist.setLabel('left', "Conc. of target cell in target region", units='')
hist.plot(Threshs,conc_target_cell, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,conc_target_cell,pen=color)
hist.setXRange(0, 1, padding=0)
#Add indicator for the currently used threshold
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
self.line = pg.InfiniteLine(pos=threshold, angle=90, pen='w', movable=False)
hist.addItem(self.line)
def classify(self):
#Very similar function to "Update Plot". But here, no graphs are produced
#Resulting scores/predictions etc are simply stored to excel file
#This function does NOT take labels.
#Check if a model was defined
if self.load_model_path == None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
AvailableFiles = self.items_available()
rtdc_paths = [file_["rtdc_path"] for file_ in AvailableFiles]
#Classify all datasets or just one?
Files,FileIndex = [],[]
if self.radioButton_selectAll.isChecked():
Files = rtdc_paths
FileIndex = list(range(len(Files)))
if len(Files)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There are no files on the 'Build'-Tab")
msg.setWindowTitle("No files found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_selectDataSet.isChecked():
rtdc_path = self.comboBox_selectData.currentText()
Files.append(rtdc_path)
#get the index of this file on the table
FileIndex = [int(self.comboBox_selectData.currentIndex())]
#FileIndex = list(np.where(np.array(rtdc_path)==np.array(rtdc_paths))[0])
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons (left) to indicate if all or only a selected file should be classified.")
msg.setWindowTitle("No file(s) specified")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
print("Chosen file(s):")
print(Files)
#what input size is required by loaded model?
crop = int(self.spinBox_Crop_2.value())
norm = str(self.comboBox_Normalization_2.currentText())
paddingMode = str(self.comboBox_paddingMode.currentText())
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in AvailableFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
xtra_in = set([selectedfile["xtra_in"] for selectedfile in AvailableFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#if normalization method needs mean/std of training set, the metafile needs to be loaded:
if norm == "StdScaling using mean and std of all training data":
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
parameters = pd.read_excel(metafile_path,sheet_name='Parameters')
mean_trainingdata = parameters["Mean of training data used for scaling"]
std_trainingdata = parameters["Std of training data used for scaling"]
else:
mean_trainingdata = None
std_trainingdata = None
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
in_dim = model_keras.get_input_shape_at(node_index=0)
#Get the color mode of the model
channels_model = in_dim[-1]
if channels_model==1:
color_mode='Grayscale'
elif channels_model==3:
color_mode='RGB'
else:
print("Invalid number of channels. AID only supports grayscale (1 channel) and RGB (3 channels) images.")
#Get the user-set export option (Excel or to 'userdef0' in .rtdc file)
export_option = str(self.comboBox_scoresOrPrediction.currentText())
if export_option == "Add predictions to .rtdc file (userdef0)" or export_option=="Add pred&scores to .rtdc file (userdef0 to 9)":
#Users sometimes need to have Donor-ID (Parent foldername) added to the .rtdc file
#Ask the user: Do you want to get a specific fixed addon to filename, OR do you want to have the parent-foldername added?
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Do you want to get a specific fixed addon to filename, <b>or do you want to have the parent-foldername added for each file individually?"
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Filename-addon for created files")
msg.addButton(QtGui.QPushButton('Specific fixed addon...'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Parent foldername'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
#Get some user input:
fname_addon, ok = QtWidgets.QInputDialog.getText(self, 'Specific fixed addon...', 'Enter filname addon:')
if ok:
fname_addon = str(fname_addon)
else:
return
elif retval==1:
fname_addon = "Action:GetParentFolderName!"
else:
return
#Iterate over all Files
for iterable in range(len(Files)):#rtdc_path in Files:
print("Files:"+str(Files))
print("iterable:"+str(iterable))
rtdc_path = Files[iterable]
print("rtdc_path:"+str(rtdc_path))
print("FileIndex:"+str(FileIndex))
print("zoom_factors:"+str(zoom_factors))
f_index = FileIndex[iterable]
zoom_factor = zoom_factors[f_index]
#get all images, cropped correcetly
gen_train = aid_img.gen_crop_img(crop,rtdc_path,replace=True,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_order,color_mode=color_mode,padding_mode=paddingMode,xtra_in=xtra_in)
x_train,index,xtra_train = next(gen_train) #x_train-images of all cells, index-original index of all cells
if norm == "StdScaling using mean and std of all training data":
x_train = aid_img.image_normalization(x_train,norm,mean_trainingdata,std_trainingdata)
else:
x_train = aid_img.image_normalization(x_train,norm)
#Check the input dimensions:
img_dim = x_train.shape[-2]
model_in = int(self.spinBox_Crop_2.value())
if model_in!=img_dim:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("New model has different input dimensions (image crop). Validation set is re-loaded (clicked files on build-tab)")
msg.setWindowTitle("Input dimensions not fitting")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
scores = model_keras.predict(x_train)
scores_normal = np.copy(scores)
pred_normal = np.argmax(scores_normal,axis=1)
#Get settings from the GUI
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
#Use argmax for prediction (threshold can only be applied to one index)
pred_normal = np.argmax(scores,axis=1)
#First: check the scores_in_function of the sorting index and adjust them using the threshold
pred_thresh = np.array([1 if p>threshold else 0 for p in scores[:,target_index]])
#replace the corresponding column in the scores_in_function
scores[:,target_index] = pred_thresh
#Determine the prediction again, considering the threshold for the target index
pred_thresh = np.argmax(scores,axis=1)
normal_or_thresh = bool(self.checkBox_SortingThresh.isChecked())
if normal_or_thresh==True: #(if its true means its normal means p=0.5)
prediction_to_rtdc_ds = pred_normal
if normal_or_thresh==False: #(if its false means its thresholded for some class)
prediction_to_rtdc_ds = pred_thresh
if export_option == "Scores and predictions to Excel sheet":
info = np.array([[self.load_model_path],[rtdc_path],[target_index],[threshold]]).T
info = pd.DataFrame(info,columns=["load_model_path","rtdc_path","target_class","threshold"])
#Combine all information in nice excel sheet
filename = rtdc_path.split(".rtdc")[0]+"_Prediction.xlsx"
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the -session.xlsx
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import logging
import traceback
import warnings
import pandas as pd
import numpy as np
from pandas.core.common import SettingWithCopyWarning
from common.base_parser import BaseParser
from common.constants import *
from common.database import get_database
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', handlers=[logging.StreamHandler()])
NODE_ASSOCIATION = 'Association'
ZENODO_CHEMICAL2DISEASE_FILE = 'Chemical2Disease_assoc_theme.tsv'
ZENODO_CHEMICAL2GENE_FILE = 'Chemical2Gene_assoc_theme.tsv'
ZENODO_GENE2DISEASE_FILE = 'Gene2Disease_assoc_theme.tsv'
ZENODO_GENE2GENE_FILE = 'Gene2Gene_assoc_theme.tsv'
headers = ['pmid', 'sentence_num', 'entry_formatted', 'entry1Loc', 'entry2_formatted', 'entry2Loc',
'entry1_name', 'entry2_name', 'entry1_id', 'entry2_id', 'entry1_type', 'entry2_type', 'path', 'sentence']
dependency_headers = ['snippet_id', 'entry1_id', 'entry2_id', 'entry1_name', 'entry2_name', 'path']
columns = ['pmid', 'sentence_num', 'entry1_name', 'entry2_name', 'entry1_id', 'entry2_id', 'path', 'sentence']
theme_map = {
'A+': 'agonism, activation',
'A-': 'antagonism, blocking',
'B': 'binding, ligand (esp. receptors)',
'E+': 'increases expression/production',
'E-': 'decreases expression/production',
'E': 'affects expression/production (neutral)',
'N': 'inhibits',
'O': 'transport, channels',
'K': 'metabolism, pharmacokinetics',
'Z': 'enzyme activity',
'T': 'treatment/therapy (including investigatory)',
'C': 'inhibits cell growth (esp. cancers)',
'Sa': 'side effect/adverse event',
'Pr': 'prevents, suppresses',
'Pa': 'alleviates, reduces',
'J': 'role in disease pathogenesis',
'Mp': 'biomarkers (of disease progression)',
'U': 'causal mutations',
'Ud': 'mutations affecting disease course',
'D': 'drug targets',
'Te': 'possible therapeutic effect',
'Y': 'polymorphisms alter risk',
'G': 'promotes progression',
'Md': 'biomarkers (diagnostic)',
'X': 'overexpression in disease',
'L': 'improper regulation linked to disease',
'W': 'enhances response',
'V+': 'activates, stimulates',
'I': 'signaling pathway',
'H': 'same protein or complex',
'Rg': 'regulation',
'Q': 'production by cell population',
}
class LiteratureDataParser(BaseParser):
def __init__(self, prefix: str):
BaseParser.__init__(self, prefix, 'literature')
self.parsed_dir = os.path.join(self.output_dir, 'parsed')
os.makedirs(self.output_dir, 0o777, True)
os.makedirs(self.parsed_dir, 0o777, True)
self.literature_chemicals = set()
self.literature_genes = set()
self.literature_diseases = set()
def get_datafile_name(self, entry1_type, entry2_type, with_theme=False):
if with_theme:
return os.path.join(
self.download_dir, f'part-ii-dependency-paths-{entry1_type.lower()}-{entry2_type.lower()}-sorted-with-themes.txt.gz')
return os.path.join(
self.download_dir, f'part-ii-dependency-paths-{entry1_type.lower()}-{entry2_type.lower()}-sorted.txt.gz')
def get_path2theme_datafile_name(self, entry1_type, entry2_type):
return os.path.join(
self.download_dir, f'part-i-{entry1_type.lower()}-{entry2_type.lower()}-path-theme-distributions.txt.gz')
def parse_dependency_file(self, entry1_type, entry2_type, snippet_file, with_theme=True):
"""
clean file, and write into a few cleaned file format into outfile folder 'parsed'. Update entities set,
and write data to the following files in the parsed folder
- snippet.tsv
- entity12entity2_assoc.tsv: with snippet_id, entry1, entry2, path columns
The files need to be further validated and cleaned by removing duplicates, un-matched genes, chemicals and diseaese
:param entry1_type:
:param entry2_type:
:return:
"""
file = self.get_datafile_name(entry1_type, entry2_type, with_theme)
if with_theme:
outfile = open(os.path.join(self.parsed_dir, f'{entry1_type}2{entry2_type}_assoc_theme.tsv'), 'w')
else:
outfile = open(os.path.join(self.parsed_dir, f'{entry1_type}2{entry2_type}_assoc.tsv'), 'w')
f = lambda x: str(x)
converters = {'pmid': f, 'sentence_num': f, 'entry1_id': f, 'entry2_id': f}
logging.info('processing ' + file)
count = 0
filerow_count = 0
data_chunk = pd.read_csv(
file, sep='\t',
names=headers,
usecols=columns,
converters=converters,
chunksize=10000,
index_col=False
)
for i, trunk in enumerate(data_chunk):
filerow_count += len(trunk)
if i % 10 == 0:
print(i)
try:
df = trunk.replace({'null': np.nan, '-': np.nan})
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
if entry1_type == entry2_type:
df = df[(df['entry1_name'] != df['entry2_name']) & (df['entry1_id'] != df['entry2_id'])]
if len(df) == 0:
continue
# clean gene ids
if entry1_type == NODE_GENE:
df = self.clean_gene_column(df, 'entry1_id')
df['entry1_id'] = df['entry1_id'].apply(f)
if entry2_type == NODE_GENE:
df = self.clean_gene_column(df, 'entry2_id')
df['entry2_id'] = df['entry2_id'].apply(f)
if entry1_type == NODE_GENE:
df = df[df['entry1_id'] != df['entry2_id']]
if entry2_type == NODE_DISEASE:
df = df[~df['entry2_id'].str.startswith('OMIM')]
df['entry2_id'] = df['entry2_id'].apply(
lambda x: x if str(x).startswith('MESH') else 'MESH:' + str(x))
if entry1_type == NODE_CHEMICAL:
df['entry1_id'] = df['entry1_id'].apply(
lambda x: x if str(x).startswith('CHEBI') or str(x).startswith('MESH') else 'MESH:' + str(x)
)
df['snippet_id'] = df.apply(lambda row: str(row['pmid']) + '-' + str(row['sentence_num']), axis=1)
df_assoc = df[dependency_headers]
df_assoc.to_csv(outfile, index=False, mode='a', sep='\t')
if snippet_file:
df_snippet = df[['snippet_id', 'pmid', 'sentence']].copy()
df_snippet.drop_duplicates(inplace=True)
df_snippet.to_csv(snippet_file, index=False, mode='a', sep='\t')
# update literature genes, diseases
if entry1_type == NODE_GENE:
self.literature_genes.update(df['entry1_id'].tolist())
if entry1_type == NODE_CHEMICAL:
self.literature_chemicals.update(df['entry1_id'].tolist())
if entry2_type == NODE_GENE:
self.literature_genes.update(df['entry2_id'].tolist())
if entry2_type == NODE_DISEASE:
self.literature_diseases.update(df['entry2_id'].tolist())
count = count + len(df)
except Exception as ex:
traceback.print_exc()
print(f'Errored out at index {i}')
break
logging.info('file rows processed: ' + str(filerow_count) + ', cleaned file row:' + str(count))
outfile.close()
def parse_dependency_files(self):
"""
Process all dependency file (with theme), write into parsed folder.
part-ii-dependency-paths-chemical-disease-sorted.txt.gz
file rows processed: 15645444, cleaned file row:12881577
part-ii-dependency-paths-chemical-gene-sorted.txt.gz
file rows processed: 9525647, cleaned file row:7958425
part-ii-dependency-paths-gene-disease-sorted.txt.gz
file rows processed: 12792758, cleaned file row:12808885
part-ii-dependency-paths-gene-gene-sorted.txt.gz
file rows processed: 34089578, cleaned file row:25333884
literature genes:150380
literature diseases:8586
literature chemicals:66178
:return:
"""
snippet_file = open(os.path.join(self.parsed_dir, self.file_prefix + 'snippet.tsv'), 'w')
self.parse_dependency_file(NODE_CHEMICAL, NODE_DISEASE, snippet_file, True)
self.parse_dependency_file(NODE_CHEMICAL, NODE_GENE, snippet_file, True)
self.parse_dependency_file(NODE_GENE, NODE_DISEASE, snippet_file, True)
self.parse_dependency_file(NODE_GENE, NODE_GENE, snippet_file, True)
snippet_file.close()
self.parse_dependency_file(NODE_CHEMICAL, NODE_DISEASE, None, True)
self.parse_dependency_file(NODE_CHEMICAL, NODE_GENE, None, True)
self.parse_dependency_file(NODE_GENE, NODE_DISEASE, None, True)
self.parse_dependency_file(NODE_GENE, NODE_GENE, None, True)
logging.info('literature genes:' + str(len(self.literature_genes)))
logging.info('literature diseases:' + str(len(self.literature_diseases)))
logging.info('literature chemicals:' + str(len(self.literature_chemicals)))
db = get_database()
print('Cleaning chemical...')
self.literature_chemicals = set(val for entry in self.literature_chemicals for val in entry.split('|'))
chemical_ids_to_exclude = db.get_data(
'MATCH (n:Chemical) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT split(entry, ":")[1] IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_chemicals)})['exclude'].tolist()[0]
print('Cleaning disease...')
disease_ids_to_exclude = db.get_data(
'MATCH (n:Disease) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT split(entry, ":")[1] IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_diseases)})['exclude'].tolist()[0]
print('Cleaning gene...')
gene_ids_to_exclude = db.get_data(
'MATCH (n:Gene:db_NCBI) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT entry IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_genes)})['exclude'].tolist()[0]
# print('Cleaning chemical...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'chemical.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_chemicals - set(chemical_ids_to_exclude))])
# print('Cleaning disease...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'disease.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_diseases - set(disease_ids_to_exclude))])
# print('Cleaning gene...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'gene.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_genes - set(gene_ids_to_exclude))])
cleaned_chemical_ids = list(self.literature_chemicals - set(chemical_ids_to_exclude))
cleaned_disease_ids = list(self.literature_diseases - set(disease_ids_to_exclude))
cleaned_gene_ids = list(self.literature_genes - set(gene_ids_to_exclude))
self.clean_dependency_files(NODE_CHEMICAL, NODE_DISEASE, cleaned_chemical_ids, cleaned_disease_ids)
self.clean_dependency_files(NODE_CHEMICAL, NODE_GENE, cleaned_chemical_ids, cleaned_gene_ids)
self.clean_dependency_files(NODE_GENE, NODE_DISEASE, cleaned_gene_ids, cleaned_disease_ids)
self.clean_dependency_files(NODE_GENE, NODE_GENE, cleaned_gene_ids, cleaned_gene_ids)
def clean_dependency_files(self, entry1_type, entry2_type, entry1_ids, entry2_ids):
input_file = f'{entry1_type}2{entry2_type}_assoc_theme.tsv'
file_path = os.path.join(self.parsed_dir, input_file)
converters = None
if entry1_type == NODE_GENE or entry2_type == NODE_GENE:
f = lambda x: str(x)
converters = {'entry1_id': f, 'entry2_id': f}
df = pd.read_csv(file_path, header=0, sep='\t', converters=converters, names=dependency_headers)
df.drop_duplicates(inplace=True)
df.set_index('entry1_id', inplace=True)
df = df[df.index.isin(entry1_ids)]
df.reset_index(inplace=True)
df.set_index('entry2_id', inplace=True)
df = df[df.index.isin(entry2_ids)]
df.reset_index(inplace=True)
df.to_csv(file_path, index=False, sep='\t', chunksize=50000)
def parse_path2theme_file(self, entry1_type, entry2_type):
file = self.get_path2theme_datafile_name(entry1_type, entry2_type)
df =
|
pd.read_csv(file, sep='\t', index_col='path')
|
pandas.read_csv
|
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] =
|
pd.concat([x[project['_id']], inp])
|
pandas.concat
|
# -*- coding: utf-8 -*-
# %%
import pandas as pd
import numpy as np
import tkinter as tk
class package:
def __init__(self):
# elements defined
C = 12
H = 1.007825
N = 14.003074
O = 15.994915
P = 30.973763
S = 31.972072
Na = 22.98977
Cl = 34.968853
self.elements = [C,H,N,O,P,S,Na,Cl]
self.elementsymbol = ['C','H','N','O','P','S','Na','Cl']
ionname = ['M','M+H','M+2H','M+H-H2O','M+2H-H2O','M+Na','M+2Na','M+2Na-H','M+NH4',
'M-H','M-2H','M-3H','M-4H','M-5H','M-H-H2O','M-2H-H2O','M-CH3','M+Cl','M+HCOO','M+OAc']
ionfunc = []
ionfunc.append(lambda ms: ms)
ionfunc.append(lambda ms: ms+package().elements[1])
ionfunc.append(lambda ms: (ms+2*package().elements[1])/2)
ionfunc.append(lambda ms: ms-package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-package().elements[3])/2)
ionfunc.append(lambda ms: ms+package().elements[6])
ionfunc.append(lambda ms: (ms+2*package().elements[6])/2)
ionfunc.append(lambda ms: ms-package().elements[1]+2*package().elements[6])
ionfunc.append(lambda ms: ms+4*package().elements[1]+package().elements[2])
ionfunc.append(lambda ms: ms-package().elements[1])
ionfunc.append(lambda ms: (ms-2*package().elements[1])/2)
ionfunc.append(lambda ms: (ms-3*package().elements[1])/3)
ionfunc.append(lambda ms: (ms-4*package().elements[1])/4)
ionfunc.append(lambda ms: (ms-5*package().elements[1])/5)
ionfunc.append(lambda ms: ms-3*package().elements[1]-package().elements[3])
ionfunc.append(lambda ms: (ms-4*package().elements[1]-package().elements[3])/2)
ionfunc.append(lambda ms: ms-package().elements[0]-3*package().elements[1])
ionfunc.append(lambda ms: ms+package().elements[7])
ionfunc.append(lambda ms: ms+package().elements[0]+package().elements[1]+2*package().elements[3])
ionfunc.append(lambda ms: ms+2*package().elements[0]+3*package().elements[1]+2*package().elements[3])
self.ion = {}
for i,j in enumerate(ionname):
self.ion[j] = ionfunc[i]
# %% [markdown]
# Package for Sphingolipids
# %%
class package_sl(package):
def __init__(self):
# base structure defined
self.base = {'Cer': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphingosine': np.array([0,3,1,0]+[0]*(len(package().elements)-4)),
'Sphinganine': np.array([0,3,1,0]+[0]*(len(package().elements)-4))}
# headgroups defined
headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glc','Gal','GalNAc','NeuAc','Fuc','NeuGc']
formula = []
formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))
formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([8,15,1,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,9]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,5]+[0]*(len(package().elements)-4)))
formula.append(np.array([11,19,1,10]+[0]*(len(package().elements)-4)))
self.components = self.base.copy()
for i,j in enumerate(headgroup):
self.components[j] = formula[i]
# sn type defined
sntype = ['none','d','t']
snformula = []
snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,3]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,4]+[0]*(len(package().elements)-4)))
self.sn = {}
for i,j in enumerate(sntype):
self.sn[j] = snformula[i]
# extended structure
nana = ['M','D','T','Q','P']
iso = ['1a','1b','1c']
namedf = pd.DataFrame({'0-series': ['LacCer'],'a-series': ['GM3'],'b-series': ['GD3'],'c-series': ['GT3']})
namedf = namedf.append(pd.Series(['G'+'A'+'2' for name in namedf.iloc[0,0:1]]+['G'+i+'2' for i in nana[0:3]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'A'+'1' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[0:3],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'M'+'1b' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[1:4],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'D'+'1c' for name in namedf.iloc[0,0:1]]+['G'+i+j for i,j in zip(nana[2:],iso)],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series(['G'+'D'+'1α' for name in namedf.iloc[0,0:1]]+[i+'α' for i in namedf.iloc[4,1:]],index = namedf.columns), ignore_index=True)
sequencedf = pd.DataFrame({'0-series': ['Gal-Glc-Cer'],'a-series': ['(NeuAc)-Gal-Glc-Cer'],'b-series': ['(NeuAc-NeuAc)-Gal-Glc-Cer'],'c-series': ['(NeuAc-NeuAc-NeuAc)-Gal-Glc-Cer']})
sequencedf = sequencedf.append(pd.Series(['GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Gal-'+formula for formula in sequencedf.iloc[1,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[2,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-'+formula for formula in sequencedf.iloc[3,:]],index = namedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['NeuAc-Gal-(NeuAc)-GalNAc-'+formula for formula in sequencedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
self.base = {'Cer': 'Cer','Sphingosine': 'Sphingosine','Sphinganine': 'Sphinganine','Sphingosine-1-Phosphate': 'Pi-Sphingosine','Sphinganine-1-Phosphate': 'Pi-Sphinganine',
'CerP': 'Pi-Cer','SM': 'Choline-Pi-Cer','CerPEtn': 'Ethanolamine-Pi-Cer','CerPIns': 'Inositol-Pi-Cer',
'LysoSM(dH)': 'Choline-Pi-Sphinganine','LysoSM': 'Choline-Pi-Sphingosine',
'GlcCer': 'Glc-Cer','GalCer': 'Gal-Cer'}
for i in namedf:
for j,k in enumerate(namedf[i]):
self.base[k] = sequencedf[i][j]
def basesn(self,base,typ):
typ = base[typ].split('-')[-1]
if 'Cer' == base[typ]:
return [['d','t'],list(range(18,23)),':',[0,1],'/',['none','h'],list(range(12,33)),':',[0,1]]
elif 'Sphingosine' == base[typ]:
return [['d','t'],list(range(18,23)),':','1']
elif 'Sphinganine' == base[typ]:
return [['d','t'],list(range(18,23)),':','0']
else:
return 0
def iterate(self,base,typ,start,end):
typ = base[typ].split('-')[-1]
start = pd.Series(start)
end = pd.Series(end)
start = start.replace('none','')
end = end.replace('none','')
if 'Cer' == base[typ]:
return ['{}{}:{}/{}{}:{}'.format(i,j,k,l,m,n) for i in [start[0]] for k in range(int(start[2]),int(end[2])+1) for j in range(int(start[1]),int(end[1])+1) for n in range(int(start[5]),int(end[5])+1) for l in [start[3]] for m in range(int(start[4]),int(end[4])+1)]
elif 'Sphingosine' == base[typ]:
return ['{}{}:1'.format(i,j) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1)]
elif 'Sphinganine' == base[typ]:
return ['{}{}:0'.format(i,j) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1)]
else:
return 0
# %% [markdown]
# Package for Glycerophospholipids
# %%
class package_gpl(package):
def __init__(self):
# base structure defined
self.base = {'PA': np.array([3,9,0,6,1]+[0]*(len(package().elements)-5)),
'LysoPA': np.array([3,9,0,6,1]+[0]*(len(package().elements)-5))}
# headgroups defined
headgroup = ['Pi','Choline','Ethanolamine','Inositol','Glycerol']
formula = []
formula.append(np.array([0,3,0,4,1]+[0]*(len(package().elements)-5)))
formula.append(np.array([5,13,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([2,7,1,1]+[0]*(len(package().elements)-4)))
formula.append(np.array([6,12,0,6]+[0]*(len(package().elements)-4)))
formula.append(np.array([3,8,0,3]+[0]*(len(package().elements)-4)))
self.components = self.base.copy()
for i,j in enumerate(headgroup):
self.components[j] = formula[i]
# sn type defined
sntype = ['none','O','P']
snformula = []
snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,2]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon+2-2*db,0,1]+[0]*(len(package().elements)-4)))
snformula.append(lambda carbon,db: np.array([carbon,2*carbon-2*db,0,1]+[0]*(len(package().elements)-4)))
self.sn = {}
for i,j in enumerate(sntype):
self.sn[j] = snformula[i]
# extended structure(extended structure can be defined by library.baseext())
namedf = pd.DataFrame({'a': ['PA'],'b': ['LysoPA']})
namedf = namedf.append(pd.Series([name[0:-1]+'C' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'E' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'G' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'GP' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'I' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'IP' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'IP2' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
namedf = namedf.append(pd.Series([name[0:-1]+'IP3' for name in namedf.iloc[0,:]],index = namedf.columns), ignore_index=True)
sequencedf = pd.DataFrame({'a': ['PA'],'b': ['LysoPA']})
sequencedf = sequencedf.append(pd.Series(['Choline-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Ethanolamine-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Glycerol-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[3,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Inositol-'+name for name in sequencedf.iloc[0,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[5,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[6,:]],index = sequencedf.columns), ignore_index=True)
sequencedf = sequencedf.append(pd.Series(['Pi-'+name for name in sequencedf.iloc[7,:]],index = sequencedf.columns), ignore_index=True)
self.base = {'PA': 'PA','LysoPA': 'LysoPA'}
for i in namedf:
for j,k in enumerate(namedf[i]):
self.base[k] = sequencedf[i][j]
def basesn(self,base,typ):
typ = base[typ].split('-')[-1]
if 'PA' == base[typ]:
return [['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6],'/',['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6]]
elif 'LysoPA' == base[typ]:
return [['none','O','P'],list(range(2,27)),':',[0,1,2,3,4,5,6]]
else:
return 0
def iterate(self,base,typ,start,end):
typ = base[typ].split('-')[-1]
start = pd.Series(start)
end = pd.Series(end)
start = start.replace('none','')
end = end.replace('none','')
if 'PA' == base[typ]:
return ['{}{}:{}/{}{}:{}'.format(i,j,k,l,m,n) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1) for k in range(int(start[2]),int(end[2])+1) for l in [start[3]] for m in range(int(start[4]),int(end[4])+1) for n in range(int(start[5]),int(end[5])+1)]
elif 'LysoPA' == base[typ]:
return ['{}{}:{}'.format(i,j,k) for i in [start[0]] for j in range(int(start[1]),int(end[1])+1) for k in range(int(start[2]),int(end[2])+1)]
else:
return 0
# %% [markdown]
# library class
# %%
class library(package):
def __init__(self,pack):
self.elements = package().elements
self.elementsymbol = package().elementsymbol
self.ion = package().ion
self.components = {}
self.base = {}
self.sn = {}
self.basesnorg = []
self.iterateorg = []
for i,j in enumerate(pack):
self.components = {**self.components,**j().components}
self.base = {**self.base,**j().base}
self.sn = {**self.sn,**j().sn}
self.basesnorg.append(j().basesn)
self.iterateorg.append(j().iterate)
def basesn(self,typ):
base = self.base
for i in range(len(self.basesnorg)):
if not self.basesnorg[i](base,typ) == 0:
return self.basesnorg[i](base,typ)
def iterate(self,typ,start,end):
base = self.base
for i in range(len(self.iterateorg)):
if not self.iterateorg[i](base,typ,start,end) == 0:
return self.iterateorg[i](base,typ,start,end)
def newhgdef(self,newheadgroup,newformula):
self.components[newheadgroup] = newformula
def baseext(self,name,sequence):
self.base[name] = sequence
def mscomp(self,name):
components = name.split('-')
base = components[-1].split('(')[0]
sn = components[-1].split('(')[1].split(')')[0].split('/')
hg = '('+name.replace(base,'')+self.base[base]+')'
hgcode = []
s = 0
hg = hg.split('-')
hg.reverse()
for i,j in enumerate(hg):
if ')' in j:
s += 1
hgcode.append(s)
if '(' in j:
s+= -1
hg[i] = j.replace('(','').replace(')','')
code = []
for i,j in enumerate(hgcode):
if i == 0:
code.append([0])
elif hgcode[i-1] == j:
new = code[i-1].copy()
last = new[-1]+1
new.pop()
new.append(last)
code.append(new)
elif hgcode[i-1] < j:
new = code[i-1].copy()
new.append(0)
code.append(new)
elif hgcode[i-1] > j:
pre = max([k for k in range(i) if hgcode[k] == j])
new = code[pre].copy()
last = new[-1]+1
new.pop()
new.append(last)
code.append(new)
comp = pd.DataFrame({'headgroups': hg,'position': code})
return comp
def msformula(self,name,mode):
components = name.split('-')
base = components[-1].split('(')[0]
sn = components[-1].split('(')[1].split(')')[0].split('/')
headgroups = components[0:-1]
for hg in headgroups:
if '(' in hg:
if ')' not in hg.split('(')[1]:
headgroups[headgroups.index(hg)] = hg.split('(')[1]
elif ')' in hg.split('(')[1]:
headgroups[headgroups.index(hg)] = hg.split('(')[1].split(')')[0]
elif ')' in hg:
headgroups[headgroups.index(hg)] = hg.split(')')[0]
ms = np.array([0,2,0,1]+[0]*(len(self.elements)-4))
H2O = np.array([0,2,0,1]+[0]*(len(self.elements)-4))
for hg in headgroups:
ms += self.components[hg]
ms += -H2O
components = self.base[base].split('-')
for c in components:
if '(' in c:
if ')' not in c.split('(')[1]:
components[components.index(c)] = c.split('(')[1]
elif ')' in c.split('(')[1]:
components[components.index(c)] = c.split('(')[1].split(')')[0]
elif ')' in c:
components[components.index(c)] = c.split(')')[0]
for c in components:
ms += self.components[c]
ms += -H2O
for sni in sn:
if 'd' in sni:
carbon = int(sni.split('d')[1].split(':')[0])
db = int(sni.split('d')[1].split(':')[1])
ms += self.sn['d'](carbon,db)
elif 't' in sni:
carbon = int(sni.split('t')[1].split(':')[0])
db = int(sni.split('t')[1].split(':')[1])
ms += self.sn['t'](carbon,db)
elif 'O' in sni:
carbon = int(sni.split('O')[1].split(':')[0])
db = int(sni.split('O')[1].split(':')[1])
ms += self.sn['O'](carbon,db)
elif 'P' in sni:
carbon = int(sni.split('P')[1].split(':')[0])
db = int(sni.split('P')[1].split(':')[1])
ms += self.sn['P'](carbon,db)
else:
carbon = int(sni.split(':')[0])
db = int(sni.split(':')[1])
ms += self.sn['none'](carbon,db)
ms += -H2O
if mode == 'raw':
return ms
elif mode == 'molecule':
formulalist = [i+'{}'.format(j) for i,j in zip(self.elementsymbol[0:len(ms)],ms) if j > 0]
formula = ''
for f in formulalist:
formula += f
return formula
def mscalculator(self,name,ion):
ms = (self.msformula(name,mode='raw')*self.elements[0:len(self.msformula(name,mode='raw'))]).cumsum()[-1]
return self.ion[ion](ms)
def export(self):
expwind = tk.Tk()
expwind.title('Export settings')
expwind.geometry('700x300')
var_base = tk.StringVar()
initialbase = list(self.base.keys())
title = tk.Label(expwind,text = 'Select base')
title.config(font=("Times New Roman", 20))
var_base.set(initialbase)
listbox1 = tk.Listbox(expwind,listvariable = var_base,selectmode = 'extended')
listbox1.config(font=("Times New Roman", 12))
var_add = tk.StringVar()
subtitle = tk.Label(expwind,text = 'others')
subtitle.config(font=("Times New Roman", 15))
other = tk.Entry(expwind,textvariable = var_add)
other.config(font=("Times New Roman", 12))
def base_selection():
global base_input
base_input = [listbox1.get(i) for i in listbox1.curselection()]
title.destroy()
listbox1.destroy()
button1.destroy()
subtitle.destroy()
other.destroy()
addbutton.destroy()
global sn_input
sn_input = []
def snloop(i,skip,add,apply):
if skip == True:
i += 1
else:
global menu_st,menu_end,var_st,var_end
menu_st = []
menu_end = []
var_st = []
var_end = []
title = tk.Label(expwind,text = base_input[i])
title.config(font=("Times New Roman", 20))
title.grid(row = 0,column = 0,padx=20)
labelstart = tk.Label(expwind,text = 'start')
labelstart.config(font=("Times New Roman", 15))
labelend = tk.Label(expwind,text = 'end')
labelend.config(font=("Times New Roman", 15))
labelstart.grid(row = 1,column = 0,padx=20)
label = []
for n,sntype in enumerate(self.basesn(base_input[i])):
if type(sntype) == str:
label.append(tk.Label(expwind,text = sntype))
label[-1].config(font=("Times New Roman", 12))
label[-1].grid(row = 1,column = n+1)
else:
var_st.append(tk.StringVar())
menu_st.append(tk.OptionMenu(expwind,var_st[-1],*sntype))
menu_st[-1].config(font=("Times New Roman", 12))
menu_st[-1].grid(row = 1,column = n+1)
labelend.grid(row = 2,column = 0,padx=20)
for n,sntype in enumerate(self.basesn(base_input[i])):
if type(sntype) == str:
label.append(tk.Label(expwind,text = sntype))
label[-1].config(font=("Times New Roman", 12))
label[-1].grid(row = 2,column = n+1)
elif type(sntype[0]) == str:
label.append(tk.Label(expwind,text = ''))
label[-1].config(font=("Times New Roman", 12))
label[-1].grid(row = 2,column = n+1)
var_end.append(tk.StringVar())
var_end[-1].set(var_st[n].get())
menu_end.append(tk.OptionMenu(expwind,var_end[-1],*sntype))
else:
var_end.append(tk.StringVar())
menu_end.append(tk.OptionMenu(expwind,var_end[-1],*sntype))
menu_end[-1].config(font=("Times New Roman", 12))
menu_end[-1].grid(row = 2,column = n+1)
i += 1
def sn_selection():
st = []
end = []
for n in range(len(menu_st)):
st.append(var_st[n].get())
end.append(var_end[n].get())
menu_st[n].destroy()
menu_end[n].destroy()
for n in label:
n.destroy()
title.destroy()
labelstart.destroy()
labelend.destroy()
button2.destroy()
button3.destroy()
button4.destroy()
if add == True:
sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)
else:
sn_input.append(self.iterate(base_input[i-1],st,end))
if i < len(base_input):
snloop(i,skip = False,add = False,apply = False)
else:
cancel.destroy()
ion_selection()
def apply_all():
st = []
end = []
for n in range(len(menu_st)):
st.append(var_st[n].get())
end.append(var_end[n].get())
menu_st[n].destroy()
menu_end[n].destroy()
for n in label:
n.destroy()
title.destroy()
labelstart.destroy()
labelend.destroy()
if apply == False:
button2.destroy()
button3.destroy()
button4.destroy()
if add == True:
sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)
else:
sn_input.append(self.iterate(base_input[i-1],st,end))
if i < len(base_input):
if self.basesn(base_input[i]) in [self.basesn(base_input[p]) for p in range(i)]:
snloop(i,skip = True,add = False,apply = True)
else:
snloop(i,skip = False,add = False,apply = True)
else:
ion_selection()
def add_other():
st = []
end = []
for n in range(len(menu_st)):
st.append(var_st[n].get())
end.append(var_end[n].get())
menu_st[n].destroy()
menu_end[n].destroy()
for n in label:
n.destroy()
title.destroy()
labelstart.destroy()
labelend.destroy()
if apply == False:
button2.destroy()
button3.destroy()
button4.destroy()
if add == True:
sn_input[-1] = sn_input[-1]+self.iterate(base_input[i-1],st,end)
else:
sn_input.append(self.iterate(base_input[i-1],st,end))
if apply == True:
snloop(i-1,skip = False,add = True,apply = True)
else:
snloop(i-1,skip = False,add = True,apply = False)
if skip == False:
if apply == False:
button2 = tk.Button(expwind,text = 'confirm',command = sn_selection)
button2.config(font=("Times New Roman", 12))
button2.grid(row = 3,column = 0)
button3 = tk.Button(expwind,text = 'apply to others',command = apply_all)
button3.config(font=("Times New Roman", 12))
button3.grid(row = 3,column = 1)
button4 = tk.Button(expwind,text = 'add',command = add_other)
button4.config(font=("Times New Roman", 12))
button4.grid(row = 3,column = 2)
expwind.mainloop()
else:
index = [self.basesn(base_input[p]) for p in range(i-1)].index(self.basesn(base_input[i-1]))
sn_input.append(sn_input[index])
if i < len(base_input):
if self.basesn(base_input[i]) in [self.basesn(base_input[p]) for p in range(i)]:
snloop(i,skip = True,add = False,apply = False)
else:
snloop(i,skip = False,add = False,apply = False)
else:
cancel.destroy()
ion_selection()
snloop(0,skip = False,add = False,apply = False)
def add_base():
initialbase.append(other.get())
var_base.set(initialbase)
other.delete(first = 0,last = 100)
def ion_selection():
title1 = tk.Label(expwind,text = 'Select ions')
title1.config(font=("Times New Roman", 20))
var_ion = tk.StringVar()
var_ion.set(list(package().ion.keys()))
listbox2 = tk.Listbox(expwind,listvariable = var_ion,selectmode = 'extended')
listbox2.config(font=("Times New Roman", 12))
title1.grid(row = 0,column = 0,padx=100)
listbox2.grid(row = 1,column = 0,padx=100)
def filename_type():
global ion_input
ion_input = [listbox2.get(i) for i in listbox2.curselection()]
title1.destroy()
listbox2.destroy()
button5.destroy()
title2 = tk.Label(expwind,text = 'Type filename(with.xlsx)')
title2.config(font=("Times New Roman", 20))
var_file = tk.StringVar(value = 'library.xlsx')
file = tk.Entry(expwind,textvariable = var_file)
file.config(font=("Times New Roman", 12))
title2.grid(row = 0,column = 0,padx=100)
file.grid(row = 1,column = 0,padx=100)
def export():
global filename
filename = var_file.get()
self.toexcel()
expwind.destroy()
root()
button6 = tk.Button(expwind,text = 'export',command = export)
button6.config(font=("Times New Roman", 12))
button6.grid(row = 2,column = 0,padx=100)
expwind.mainloop()
button5 = tk.Button(expwind,text = 'confirm',command = filename_type)
button5.config(font=("Times New Roman", 12))
button5.grid(row = 2,column = 0,padx=100)
cancel = tk.Button(expwind,text = 'cancel',command = cancelrun)
cancel.config(font=("Times New Roman", 12))
cancel.grid(row = 2,column = 1,padx=5)
expwind.mainloop()
def cancelrun():
expwind.destroy()
root()
button1 = tk.Button(expwind,text = 'confirm',command = base_selection)
button1.config(font=("Times New Roman", 12))
addbutton = tk.Button(expwind,text = 'add',command = add_base)
addbutton.config(font=("Times New Roman", 12))
title.grid(row = 0,column = 0,padx=20)
listbox1.grid(row = 1,column = 0,rowspan = 9,padx=20)
button1.grid(row = 10,column = 0,padx=20)
subtitle.grid(row = 0,column = 1,padx=20)
other.grid(row = 1,column = 1,padx=20)
addbutton.grid(row = 2,column = 1,padx=20)
cancel = tk.Button(expwind,text = 'cancel',command = cancelrun)
cancel.config(font=("Times New Roman", 12))
cancel.grid(row = 10,column = 20)
expwind.mainloop()
def toexcel(self):
with pd.ExcelWriter(filename) as writer:
self.df = {}
for i,b in enumerate(base_input):
name = [b+'('+j+')' for j in sn_input[i]]
self.df[b] =
|
pd.DataFrame({b: name})
|
pandas.DataFrame
|
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
|
tm.assert_frame_equal(res2, exp)
|
pandas._testing.assert_frame_equal
|
#!python
# coding=utf-8
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
import netCDF4 as nc4
from pocean.utils import (
create_ncvar_from_series,
dict_update,
generic_masked,
get_default_axes,
get_dtype,
get_mapped_axes_variables,
get_masked_datetime_array,
get_ncdata_from_series,
normalize_countable_array,
)
from pocean.cf import CFDataset
from pocean.cf import cf_safe_name
from pocean import logger as L # noqa
class OrthogonalMultidimensionalTimeseries(CFDataset):
"""
H.2.1. Orthogonal multidimensional array representation of time series
If the time series instances have the same number of elements and the time values are identical
for all instances, you may use the orthogonal multidimensional array representation. This has
either a one-dimensional coordinate variable, time(time), provided the time values are ordered
monotonically, or a one-dimensional auxiliary coordinate variable, time(o), where o is the
element dimension. In the former case, listing the time variable in the coordinates attributes
of the data variables is optional.
"""
@classmethod
def is_mine(cls, dsg):
try:
rvars = dsg.filter_by_attrs(cf_role='timeseries_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'timeseries'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
# Not a CR
assert not dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
# Not an IR
assert not dsg.filter_by_attrs(
instance_dimension=lambda x: x is not None
)
# OM files will always have a time variable with one dimension.
assert len(dsg.t_axes()[0].dimensions) == 1
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except AssertionError:
return False
return True
@classmethod
def from_dataframe(cls, df, output, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
data_columns = [ d for d in df.columns if d not in axes ]
with OrthogonalMultidimensionalTimeseries(output, 'w') as nc:
station_group = df.groupby(axes.station)
num_stations = len(station_group)
# assume all groups are the same size and have identical times
_, sdf = list(station_group)[0]
t = sdf[axes.t]
# Metadata variables
nc.createVariable('crs', 'i4')
# Create all of the variables
nc.createDimension(axes.t, t.size)
nc.createDimension(axes.station, num_stations)
station = nc.createVariable(axes.station, get_dtype(df.station), (axes.station,))
time = nc.createVariable(axes.t, 'f8', (axes.t,))
latitude = nc.createVariable(axes.y, get_dtype(df[axes.y]), (axes.station,))
longitude = nc.createVariable(axes.x, get_dtype(df[axes.x]), (axes.station,))
z = nc.createVariable(axes.z, get_dtype(df[axes.z]), (axes.station,), fill_value=df[axes.z].dtype.type(cls.default_fill_value))
attributes = dict_update(nc.nc_attributes(axes), kwargs.pop('attributes', {}))
# tolist() converts to a python datetime object without timezone and has NaTs.
g = t.tolist()
# date2num convers NaTs to np.nan
gg = nc4.date2num(g, units=cls.default_time_unit)
# masked_invalid moves np.nan to a masked value
time[:] = np.ma.masked_invalid(gg)
for i, (uid, sdf) in enumerate(station_group):
station[i] = uid
latitude[i] = sdf[axes.y].iloc[0]
longitude[i] = sdf[axes.x].iloc[0]
# TODO: write a test for a Z with a _FillValue
z[i] = sdf[axes.z].iloc[0]
for c in data_columns:
# Create variable if it doesn't exist
var_name = cf_safe_name(c)
if var_name not in nc.variables:
v = create_ncvar_from_series(
nc,
var_name,
(axes.station, axes.t),
sdf[c],
zlib=True,
complevel=1
)
attributes[var_name] = dict_update(attributes.get(var_name, {}), {
'coordinates' : '{} {} {} {}'.format(
axes.t, axes.z, axes.x, axes.y
)
})
else:
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(sdf[c], v)
try:
v[i, :] = vvalues
except BaseException:
L.debug('{} was not written. Likely a metadata variable'.format(v.name))
# Set global attributes
nc.update_attributes(attributes)
return OrthogonalMultidimensionalTimeseries(output, **kwargs)
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True, **kwargs):
# axes = get_default_axes(kwargs.pop('axes', {}))
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows, axes=axes)
raise NotImplementedError
def to_dataframe(self, clean_cols=False, clean_rows=False, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
axv = get_mapped_axes_variables(self, axes)
# T
t = get_masked_datetime_array(axv.t[:], axv.t)
# X
x = generic_masked(axv.x[:].repeat(t.size), attrs=self.vatts(axv.x.name))
# Y
y = generic_masked(axv.y[:].repeat(t.size), attrs=self.vatts(axv.y.name))
# Z
z = generic_masked(axv.z[:].repeat(t.size), attrs=self.vatts(axv.z.name))
svar = axv.station
s = normalize_countable_array(svar)
s = np.repeat(s, t.size)
# now repeat t per station
# figure out if this is a single-station file
# do this by checking the dimensions of the Z var
if axv.z.ndim == 1:
t = np.repeat(t, len(svar))
df_data = OrderedDict([
(axes.t, t),
(axes.x, x),
(axes.y, y),
(axes.z, z),
(axes.station, s),
])
building_index_to_drop = np.ma.zeros(t.size, dtype=bool)
# Axes variables are already processed so skip them
extract_vars = copy(self.variables)
for ncvar in axv._asdict().values():
if ncvar is not None and ncvar.name in extract_vars:
del extract_vars[ncvar.name]
for i, (dnam, dvar) in enumerate(extract_vars.items()):
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
continue
vdata = vdata[0]
else:
if dvar[:].flatten().size != t.size:
L.warning("Variable {} is not the correct size, skipping.".format(dnam))
continue
building_index_to_drop = (building_index_to_drop == True) & (vdata.mask == True) # noqa
df_data[dnam] = vdata
df =
|
pd.DataFrame(df_data)
|
pandas.DataFrame
|
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, "D")
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, "ns")
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == iNaT + 1
assert max_td.value == lib.i8max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
min_td - Timedelta(2, "ns")
with pytest.raises(OverflowError, match=msg):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, "ns")
assert td is NaT
with pytest.raises(OverflowError, match=msg):
Timedelta(min_td.value - 2, "ns")
with pytest.raises(OverflowError, match=msg):
Timedelta(max_td.value + 1, "ns")
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta("30S").total_seconds() == 30.0
assert
|
Timedelta("0")
|
pandas.Timedelta
|
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def getBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if pd.isna(el1) or pd.isna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurename
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprsum = seq['features']['gpr_Frankland_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = max(phrase_ix) + 1
pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informationcontent' in seq['features'].keys():
informationcontent = seq['features']['informationcontent']
pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informationcontentfirst'] = 'numeric'
arfftype['informationcontentsecond'] = 'numeric'
arfftype['informationcontentthird'] = 'numeric'
arfftype['informationcontentfourth'] = 'numeric'
arfftype['informationcontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largetosmall'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.max(1) - diat.min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenominator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['beatstrengthfirst'] = 'numeric'
arfftype['beatstrengthsecond'] = 'numeric'
arfftype['beatstrengththird'] = 'numeric'
arfftype['beatstrengthfourth'] = 'numeric'
arfftype['beatstrengthfifth'] = 'numeric'
#these will be in crossrelations: beatstrengthfirstsecond, etc.
#pgrams['metriccontourfirst'] = [metriccontour[int(ix)] for ix in pgrams['ix0_0']]
#pgrams['metriccontoursecond'] = [metriccontour[int(ix)] for ix in pgrams['ix1_0']]
#pgrams['metriccontourthird'] = [metriccontour[int(ix)] for ix in pgrams['ix2_0']]
#pgrams['metriccontourfourth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
#pgrams['metriccontourfifth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
#arfftype['metriccontourfirst'] = '{-,=,+}'
#arfftype['metriccontoursecond'] = '{-,=,+}'
#arfftype['metriccontourthird'] = '{-,=,+}'
#arfftype['metriccontourfourth'] = '{-,=,+}'
#arfftype['metriccontourfifth'] = '{-,=,+}'
pgrams['IOIbeatfractionfirst'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix0_0'],pgrams['ix0_1'])]
pgrams['IOIbeatfractionsecond'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix1_0'],pgrams['ix1_1'])]
pgrams['IOIbeatfractionthird'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix2_0'],pgrams['ix2_1'])]
pgrams['IOIbeatfractionfourth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix3_0'],pgrams['ix3_1'])]
pgrams['IOIbeatfractionfifth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix4_0'],pgrams['ix4_1'])]
arfftype['IOIbeatfractionfirst'] = 'numeric'
arfftype['IOIbeatfractionsecond'] = 'numeric'
arfftype['IOIbeatfractionthird'] = 'numeric'
arfftype['IOIbeatfractionfourth'] = 'numeric'
arfftype['IOIbeatfractionfifth'] = 'numeric'
pgrams['durationcummulation'] = [((d2 > d1) and (d3 > d2)) for d1, d2, d3 in \
zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
arfftype['durationcummulation'] = '{True, False}'
#these will be in crossrelation: IOIbeatfractionfirstsecond, etc.
#pgrams['durationcontoursecond'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'])]
#pgrams['durationcontourthird'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
#pgrams['durationcontourfourth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionthird'],pgrams['IOIbeatfractionfourth'])]
#pgrams['durationcontourfifth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfourth'],pgrams['IOIbeatfractionfifth'])]
#arfftype['durationcontoursecond'] = '{-,=,+}'
#arfftype['durationcontourthird'] = '{-,=,+}'
#arfftype['durationcontourfourth'] = '{-,=,+}'
#arfftype['durationcontourfifth'] = '{-,=,+}'
pgrams['onthebeatfirst'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix0_0']]
pgrams['onthebeatsecond'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix1_0']]
pgrams['onthebeatthird'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix2_0']]
pgrams['onthebeatfourth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['onthebeatfifth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['onthebeatfirst'] = '{True, False}'
arfftype['onthebeatsecond'] = '{True, False}'
arfftype['onthebeatthird'] = '{True, False}'
arfftype['onthebeatfourth'] = '{True, False}'
arfftype['onthebeatfifth'] = '{True, False}'
pgrams['completesmeasurephrase'] = [completesmeasure_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesmeasuresong'] = [completesmeasure_song[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatphrase'] = [completesbeat_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatsong'] = [completesbeat_song[ix-1] for ix in pgrams['ix2_1']]
arfftype['completesmeasurephrase'] = '{True, False}'
arfftype['completesmeasuresong'] = '{True, False}'
arfftype['completesbeatphrase'] = '{True, False}'
arfftype['completesbeatsong'] = '{True, False}'
if 'grouper' in seq['features'].keys():
grouper = seq['features']['grouper']
pgrams['grouperfirst'] = [grouper[int(ix)] for ix in pgrams['ix0_0']]
pgrams['groupersecond'] = [grouper[int(ix)] for ix in pgrams['ix1_0']]
pgrams['grouperthird'] = [grouper[int(ix)] for ix in pgrams['ix2_0']]
pgrams['grouperfourth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['grouperfifth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['grouperfirst'] = '{True, False}'
arfftype['groupersecond'] = '{True, False}'
arfftype['grouperthird'] = '{True, False}'
arfftype['grouperfourth'] = '{True, False}'
arfftype['grouperfifth'] = '{True, False}'
#values for final note of third group
pgrams['noteoffset'] = pd.array([(ix-1) - phrasestart_ix[(ix-1)] for ix in pgrams['ix2_1']], dtype="Int16")
pgrams['beatoffset'] = [float(Fraction(beatinphrase[ix-1])) - \
float(Fraction(beatinphrase[phrasestart_ix[(ix-1)]])) \
for ix in pgrams['ix2_1']]
arfftype['noteoffset'] = 'numeric'
arfftype['beatoffset'] = 'numeric'
pgrams['beatduration'] = [getBeatDuration(timesig[int(ix)]) for ix in pgrams['ix0_0']]
pgrams['beatcount'] = pd.array([m21.meter.TimeSignature(timesig[int(ix)]).beatCount for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['beatduration'] = 'numeric'
arfftype['beatcount'] = 'numeric'
#get values for the last note!
pgrams['gpr2afirst'] = [gpr2a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2asecond'] = [gpr2a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2athird'] = [gpr2a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2afourth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2afifth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2afirst'] = 'numeric'
arfftype['gpr2asecond'] = 'numeric'
arfftype['gpr2athird'] = 'numeric'
arfftype['gpr2afourth'] = 'numeric'
arfftype['gpr2afifth'] = 'numeric'
pgrams['gpr2bfirst'] = [gpr2b[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2bsecond'] = [gpr2b[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2bthird'] = [gpr2b[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2bfourth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2bfifth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2bfirst'] = 'numeric'
arfftype['gpr2bsecond'] = 'numeric'
arfftype['gpr2bthird'] = 'numeric'
arfftype['gpr2bfourth'] = 'numeric'
arfftype['gpr2bfifth'] = 'numeric'
pgrams['gpr3afirst'] = [gpr3a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3asecond'] = [gpr3a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3athird'] = [gpr3a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3afourth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3afifth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3afirst'] = 'numeric'
arfftype['gpr3asecond'] = 'numeric'
arfftype['gpr3athird'] = 'numeric'
arfftype['gpr3afourth'] = 'numeric'
arfftype['gpr3afifth'] = 'numeric'
pgrams['gpr3dfirst'] = [gpr3d[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3dsecond'] = [gpr3d[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3dthird'] = [gpr3d[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3dfourth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3dfifth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3dfirst'] = 'numeric'
arfftype['gpr3dsecond'] = 'numeric'
arfftype['gpr3dthird'] = 'numeric'
arfftype['gpr3dfourth'] = 'numeric'
arfftype['gpr3dfifth'] = 'numeric'
pgrams['gprsumfirst'] = [gprsum[ix-1] for ix in pgrams['ix0_1']]
pgrams['gprsumsecond'] = [gprsum[ix-1] for ix in pgrams['ix1_1']]
pgrams['gprsumthird'] = [gprsum[ix-1] for ix in pgrams['ix2_1']]
pgrams['gprsumfourth'] = [gprsum[ix-1] if not
|
pd.isna(ix)
|
pandas.isna
|
import os, glob, sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
import re
def load_data(path):
"""Load training and testing datasets based on their path
Parameters
----------
path : relative path to location of data, should be always the same (string)
Returns
-------
Training and testing Dataframes
"""
train = pd.read_csv(os.path.join(path,'train.csv'))
test = pd.read_csv(os.path.join(path,'test.csv'))
return train, test
def modify_fare(df, n: int = 4):
"""Introduce n new intervals (based on quantiles) for the feature fare, such that it is modified from
being continuous to being discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Original dataframe with discretized version of the feature 'Fare', categories
"""
df['Fare'] = df['Fare'].fillna(df['Fare'].median())
df['Fare'] = pd.qcut(df['Fare'], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_size_family(df, mod: bool = False):
"""Defines family relations based on the features 'SibSp' (the # of siblings / spouses aboard the Titanic)
and 'Parch' (the # of parents / children aboard the Titanic)
Parameters
----------
df : panda dataframe
Returns
-------
Original dataframe with a new feature called 'FamilySize'
"""
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
if mod:
bins_ = [0,1,2,12]
df['FamilySize'] = pd.cut(df["FamilySize"], bins = bins_, labels = list(string.ascii_uppercase)[:len(bins_)-1])
return df
def get_title(name):
"""Search for individual title in a string by considering it to have a ASCII format from A-Z
Parameters
----------
name : The name from which a title wants to be extracted (string)
Returns
-------
String associated to a found title
"""
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
def get_titles(df, mod: bool = True):
"""Search for all titles inside a dataframe, given the feature 'Name'
Parameters
----------
df : panda dataframe
mod : simplify the extend of titles available (boolean)
Returns
-------
Original dataframe with a new feature called 'Title'
"""
df['Title'] = df['Name'].apply(get_title)
if mod:
# perform modifications
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
return df
def get_all_ages(df, n: int = 5):
"""Fills in empty Ages based on the Title of a person, and then introduces n intervals for the feature 'Ages',
such that it is modified from being continuous to be discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Discretized version of the feature 'Age', categories
"""
emb = []
for i, row in df.iterrows():
if pd.isnull(row['Age']):
title = row['Title']
age_avg = df['Age'][df['Title'] == title].mean()
age_std = df['Age'][df['Title'] == title].std()
emb.append(np.random.randint(age_avg - age_std, age_avg + age_std, size=1)[0])
else:
emb.append(row['Age'])
# Update column
df['Age'] = emb
# Create new column
df["Age"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_age2(df):
"""Fills in empty Ages based on the Title of a person. DR
Parameters
----------
df : panda dataframe
Returns
-------
Dataframe with missing values for age filled.
"""
ages_mean = df[['Title', 'Age']].groupby(['Title'],
as_index=False).mean().set_index('Title').rename(columns={'Age': 'mean'})
ages_std = df[['Title', 'Age']].groupby(['Title'], as_index=False).std().set_index('Title').rename(columns={'Age': 'std'})
ages_title = pd.merge(ages_mean,ages_std, how='inner', left_index=True, right_index=True)
age = []
for i, Port in df.iterrows():
if pd.isnull(Port['Age']):
age.append(np.random.normal(ages_title.loc[Port['Title'],'mean'],ages_title.loc[Port['Title'],'std']))
else:
age.append(Port['Age'])
# Update column
df['Age'] = age
return df
def get_age_group(df,n: int=10):
"""Assigns a category to the age DR
Parameters
----------
df : Dataframe
n : number of categories
Returns
-------
Dataset with Age_group column
"""
df["Age_group"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
def modify_titles(df):
"""Concatenates titles found to be similar or considered to be simplified in one category
Parameters
----------
df : panda dataframe
Returns
-------
Simplified categories in the features 'Title'
"""
# join less representative cotegories
df['Title'] = df['Title'].replace(['Lady', 'Countess',
'Capt', 'Col', 'Don', 'Dr', 'Major',
'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
return df
def get_deck(name):
"""Search for individual Capital letter inside a string associated to the cabin of a person, from A-Z
Parameters
----------
name : The name from which a deck wants to be extracted (string)
Returns
-------
Letter associated with the deck from that a person has
"""
if
|
pd.isnull(name)
|
pandas.isnull
|
import webbrowser
import numpy as np
import pandas as pd
import tax_utils as tut
from tax_calculator import TaxCalculator
class NorwegianTax(TaxCalculator):
"""
to facilitate easy input
add random text to trigger a code push...
"""
def __init__(self, salary=0, birth_year=1978, tax_year=None, gains_from_sale_fondskonto_share_comp=0, gains_from_sale_fondskonto_interest_comp=0, gains_from_sale_of_shares_ask=0, property_taxable_value=0, pension=0, pension_months=12, pension_percent=100, property_sale_proceeds=0, rental_income=0, property_sale_loss=0, bank_deposits=0,
bank_interest_income=0, interest_expenses=0, dividends=0, mutual_fund_dividends=0, gains_from_sale_of_shares=0, mutual_fund_interest_comp_profit=0, mutual_fund_interest_comp_profit_combi_fund=0, mutual_fund_share_comp_profit=0, mutual_fund_share_comp_profit_combi_fund=0, loss_fondskonto_shares=0, loss_fondskonto_interest=0, loss_ask_sale=0,
loss_from_sale_of_shares=0, loss_from_sale_mutual_fund_share_comp=0, loss_from_sale_mutual_fund_share_comp_combi_fund=0, loss_from_sale_mutual_fund_interest_comp=0,
loss_from_sale_mutual_fund_interest_comp_combi_fund=0, mutual_fund_wealth_share_comp=0, mutual_fund_wealth_interest_comp=0, wealth_in_shares=0, wealth_in_unlisted_shares=0, wealth_ask_cash=0, wealth_ask_shares=0, wealth_fondskonto_cash_interest=0, wealth_fondskonto_shares=0, municipality='0402', case_idx=None):
self._salary = salary
self._birth_year = birth_year
if tax_year is None:
tax_year =
|
pd.to_datetime('today')
|
pandas.to_datetime
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected =
|
Series([1], index=['two'])
|
pandas.Series
|
import numpy as np
import pandas as pd
import xarray as xr
from functools import reduce
from operator import mul
import sys
sys.path.append('./..')
from utils import stats
from paths_nz import *
## Functions
def read_NZprod():
'''
function for reading production data from csv
read files per month separately and join
'''
years = range(1997,2020)
months = range(1,13)
for year in years:
for month in months:
try:
prod_NZm = pd.read_csv(nz_path + "/generation/"+str(year)+f"{month:02d}"+"_Generation_MD.csv")
except:
continue
if("prod_NZ" in locals()):
prod_NZ = pd.concat([prod_NZ,prod_NZm[prod_NZm.Fuel_Code=="Wind"]])
else:
prod_NZ = prod_NZm[prod_NZm.Fuel_Code=="Wind"]
return(prod_NZ)
def prep_gen(loc,prod_NZts):
'''
helper function for preparing productiond data
selects data of one wind park loc
transforms the trading periods to datetime format
and aggregates data hourly
'''
prod_loc = prod_NZts[prod_NZts.POC_Gen==loc].drop(['POC_Gen','POC_Code'],axis=1)
ind = pd.date_range(start=pd.to_datetime(prod_loc.Trading_date.values[0]),
freq='H',
periods=len(prod_loc.TP)/2,
tz='NZ').tz_convert('UTC').repeat(2)
prod_loc = prod_loc.set_index(ind)
prod_loch = prod_loc.prod_kW.resample('H').sum()
return(prod_loch)
def rm_constTS(wpt,lim=120):
'''
function for removing constant parts of time series
all series of more than lim (standard: 120 (hours)
because longest timeseries of 0 generation in simulated data was ca 120 h)
are removed from the dataset
'''
wpt1 = wpt.copy(deep=True)
wpt1.index = wpt.index - np.timedelta64(1,'h')
# starts of constant timeseries
s = np.where((((wpt-wpt1).values[1:]==0).astype(int)-
((wpt-wpt1).values[:-1]==0).astype(int))==1)[0]
# ends of constant timeseries
e = np.where((((wpt-wpt1).values[1:]==0).astype(int)-
((wpt-wpt1).values[:-1]==0).astype(int))==-1)[0]
# filter starts and ends of rows of constant that are longer than 24 hours
sd = s[np.where((e-s)>lim)[0]]
ed = e[np.where((e-s)>lim)[0]]
rmdf = pd.Series(0,index=wpt.index)
for i in range(len(sd)):
rmdf.iloc[sd[i]:ed[i]] = 1
return(wpt.where(rmdf==0))
def tidy_prod(prod_NZ):
# bring from wide to tidy format
prod_NZt = pd.melt(prod_NZ,
id_vars = ['POC_Code','Gen_Code','Trading_date'],
value_vars = ['TP' + str(i) for i in range(1,51)],
value_name="prod_kW").dropna()
# extract number of trading period and sort
prod_NZt['TP'] = [int(tp[2:]) for tp in prod_NZt.variable.values]
prod_NZts = prod_NZt.sort_values(by=['POC_Code','Gen_Code','Trading_date','TP'])
# separate data by datetimeindex and location
# as neither POC Codes (te rere hau & twf 3) nor Gen Codes (twf 12) are unique - combine to get unique values per location
prod_NZts['POC_Gen'] = prod_NZts.Gen_Code + prod_NZts.POC_Code
prod_NZ = pd.Series(prod_NZts.POC_Gen.unique()).apply(prep_gen,args=(prod_NZts,)).transpose()
prod_NZ.columns = prod_NZts.POC_Gen.unique()
# sum up both west wind
prod_NZ['west_wind'] = prod_NZ.west_windWWD1102 + prod_NZ.west_windWWD1103
prod_NZ = prod_NZ.drop(['west_windWWD1102','west_windWWD1103'],axis=1)
# associate wind park names
# prod_NZ[1] probaly is twf 2 because it has mostly higher production than prod_NZ[0], which has lower installed capacity
gen_wps = [n[:-7] for n in prod_NZ.columns]
gen_wps[0] = "twf_1"
gen_wps[1] = "twf_2"
gen_wps[7] = "west_wind"
prod_NZ.columns = gen_wps
# remove constant parts of time series longer than one day
prod_NZh = prod_NZ.apply(rm_constTS,axis=0)
return(prod_NZh)
def load_results(dataset,gwa,parks):
'''
function for loading simulation results
dataset is either MERRA2 or ERA5
gwa is none, GWA2 or GWA3
parks are the parkname codes
'''
if gwa == 'GWA2':
rpath = results_path + '/results_GWA2/'
else:
rpath = results_path + '/'
if gwa == 'none':
file = 'windpower_NZ_'+dataset+'.nc'
else:
file = 'windpower_NZ_'+dataset+'_GWA.nc'
NZ = xr.open_dataset(rpath + file).wp.to_dataframe().reset_index().set_index(['time','location']).unstack()
# adapt datetime index of MERRA data (some hours are shifted by 30 min)
if dataset == 'MERRA2':
NZ.index.values[NZ.index.minute!=0] = NZ.index.values[NZ.index.minute!=0] - np.timedelta64(30,'m')
# sum up <NAME> and adapt names to generation data
NZ = NZ.groupby(parks,axis=1).sum(axis=1).tz_localize('UTC')
return(NZ)
def get_cap_df(cap,comdate):
'''
function for getting hourly capacities
cap is numpy array of installed capacities
comdate is numpy array of commissioning dates in datetime format
'''
com =
|
pd.DataFrame({'capacity': cap})
|
pandas.DataFrame
|
import json
import os
import random
import idx2numpy
import numpy as np
import pandas as pd
import typer
from tqdm import tqdm
np.random.seed(42)
random.seed(420)
def main(viz: bool = False):
X_train = idx2numpy.convert_from_file("data/mnist/train-images-idx3-ubyte")
y_train = idx2numpy.convert_from_file("data/mnist/train-labels-idx1-ubyte")
X_test = idx2numpy.convert_from_file("data/mnist/t10k-images-idx3-ubyte")
y_test = idx2numpy.convert_from_file("data/mnist/t10k-labels-idx1-ubyte")
X_train = to_sparse(X_train)
X_test = to_sparse(X_test)
max_length = max(x.shape[0] for x in X_train + X_test)
X_train = shuffle_and_pad(X_train, max_length, "train")
X_train = X_train.reshape(X_train.shape[0], -1)
X_test = shuffle_and_pad(X_test, max_length, "test")
X_test = X_test.reshape(X_test.shape[0], -1)
data_train = np.concatenate([y_train[:, None], X_train], axis=-1)
data_test = np.concatenate([y_test[:, None], X_test], axis=-1)
columns = [[f"x{i}", f"y{i}", f"v{i}"] for i in range(max_length)]
columns = ["label"] + sum(columns, [])
df_train =
|
pd.DataFrame(data_train, columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# description : creates latency graphs with results data. (avg latency by node, nof dataflows, throughput)
# auto configuration #
import os
import sys
import pandas
import seaborn
import matplotlib.pyplot as plt
path_to_results = "default" # no / after the name
if (len(sys.argv) >= 2): path_to_results = sys.argv[1]
# Functions
def load_results(directory_name):
"""
Input : path to the results files (generated with merge-files.sh)
Output : pandas table with all the results
Side-effect : none
DataFrame format :
query & baseline, nof dataflows, window id, window latency, avg latency
"""
nof_nodes=1
df = pandas.DataFrame()
while os.path.isfile(directory_name + '/results-' + str(nof_nodes) + '.csv'):
filename = directory_name + '/results-' + str(nof_nodes) + '.csv'
tmp = pandas.read_csv(filename, sep=',', header=None)
tmp["nof_nodes"]=nof_nodes
df = df.append(tmp)
nof_nodes*=2
df.columns=["query_baseline", "nof_dataflows", "window_id", "latency", "avg_latency", "throughput", "nof_nodes"]
return df
def read_sliding_window(src_df):
df = pandas.DataFrame()
# get rows where query is NQ5
for index, row in src_df.iterrows():
if (row.query_baseline[:3] == "NQ5"):
df = df.append(row)
return df
def read_tumbling_window(src_df):
df = pandas.DataFrame()
# get rows where query is NQ5
for index, row in src_df.iterrows():
if (row.query_baseline[:3] == "NQ8"):
df = df.append(row)
return df
def read_node(nof_nodes, src_df):
df = pandas.DataFrame(columns=["avg_latency", "nof_dataflows", "nof_nodes", "query_baseline", "throughput"])
node = int(nof_nodes)
baselines_list = src_df.query_baseline.unique()
dataflows_list = src_df.nof_dataflows.unique()
throughput_list = src_df.throughput.unique()
# replace latency values by average latency
for baseline in baselines_list:
for dataflow in dataflows_list:
for throughput in throughput_list:
# also removes extreme values / outliers (discriminant value is 5, but we don't expect latencies higher than 2 while messages are in order)
tmp = src_df.loc[(src_df.query_baseline == baseline) & (src_df.nof_dataflows == dataflow) & (src_df.nof_nodes == nof_nodes) & (src_df.throughput == throughput) & (src_df.latency < 5)]
mean = tmp.latency.mean()
df = df.append({"avg_latency":mean, "nof_dataflows":int(dataflow), "nof_nodes":int(node), "query_baseline":baseline, "throughput":int(throughput)}, ignore_index=True)
return df
def read_dataflow(nof_dataflow, src_df):
dataflow = int(nof_dataflow)
return src_df.loc[src_df.nof_dataflows == dataflow]
def generate_node_image(src_df):
return seaborn.lineplot(data=src_df, y='avg_latency', x='throughput', hue='query_baseline', markers=True)
# return seaborn.lineplot(data=src_df, y='avg_latency', x='throughput', hue='query_baseline', size='nof_dataflows', markers=True)
def save_image(plot, filename):
fig = plot.get_figure()
fig.get_axes()[0].set(xscale="log", yscale="log")
fig.savefig("graphs/" + filename)
plt.clf() # don't keep it in memory
def write_missing(baseline, nodes, dataflow, throughput):
f = open("missing.txt", 'a')
baseline_format = {
"NQ5 (count)":"NQ5",
"NQ5 (flow-wrapping)":"NQ5FW",
"NQ5 (sort)":"NQ5WS",
"NQ8 (count)":"NQ8",
"NQ8 (flow-wrapping)":"NQ8FW",
"NQ8 (sort)":"NQ8WS",
}
line = baseline_format[baseline] + " " + str(int(nodes)) + " " + str(int(dataflow)) + " " + str(int(throughput)) + '\n'
f.write(line)
f.close()
def generate_all_nodes_images(src_df, suffix="-default"):
node_list = src_df.nof_nodes.unique()
for nof_nodes in node_list:
df = read_node(nof_nodes, src_df)
dataflow_list = df.nof_dataflows.unique()
for dataflow in dataflow_list:
df2 = read_dataflow(dataflow, df)
if not df2.isnull().values.any():
plot = generate_node_image(df2)
fig = plot.get_figure()
fig.get_axes()[0].set(xlabel="Throughput (events / sec)", ylabel="Average latency ( " + str(int(dataflow)) + " dataflows , sec)")
save_image(plot, str(int(nof_nodes)) + '-' + str(int(dataflow)) + suffix + ".png")
else:
# plot doesn't contain all values for node/dataflow combination
for index, row in df2.iterrows():
if
|
pandas.isna(row["avg_latency"])
|
pandas.isna
|
# -*- coding: utf-8 -*-
"""sampling & neural network.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Plv4EZIskzlfzNNq6EOyEo45Gv5-NF0k
# **Imports**
"""
from google.colab import drive
drive.mount('/content/drive')
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import time
import random
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
"""# **Preprocessing**"""
def preprocessing_features(df_train):
"""
parse features column from long string to columns of features for every packet
"""
first_drop = pd.DataFrame(df_train['feature'].str.split('['))
x = pd.DataFrame(first_drop.feature.values.tolist()).add_prefix('feature_')
x.drop('feature_0', axis=1, inplace=True)
second_drop = pd.DataFrame(x['feature_1'].str.split(']'))
x = pd.DataFrame(second_drop.feature_1.values.tolist()).add_prefix('feature_')
x.drop('feature_1', axis=1, inplace=True)
x = x.rename({'feature_0':'feature'}, axis=1)
features = pd.DataFrame(x['feature'].str.split(','))
x = pd.DataFrame(features.feature.values.tolist()).add_prefix('feature_')
x = x.fillna(0)
x = x.apply(np.float64)
return x
# get the data from csv
csvfile = pd.read_csv('/content/drive/MyDrive/combine.csv')
df = csvfile.sample(25000)
print(df['app_label'].unique())
#### todo - count the number of uniques with FM
from torch.utils.data import Dataset
class PcapDataset(Dataset):
def __init__(self, df_train):
x = preprocessing_features(df_train)
y = df_train['app_label']
y = y.apply(np.float64)
y.replace('0', 0, inplace=True)
y.replace('1', 1, inplace=True)
y.replace('2', 2, inplace=True)
y.replace('3', 3, inplace=True)
y.replace('4', 4, inplace=True)
y.replace('5', 5, inplace=True)
y.replace('6', 6, inplace=True)
y.replace('7', 7, inplace=True)
y.replace('8', 8, inplace=True)
y.replace('9', 9, inplace=True)
y.replace('10', 10, inplace=True)
y.replace('11', 11, inplace=True)
y.replace('12', 12, inplace=True)
y.replace('13', 13, inplace=True)
y.replace('15', 15, inplace=True)
y.replace('16', 14, inplace=True)
self.x_train = torch.tensor(x.values, dtype=torch.float64)
self.y_train = torch.tensor(y.values, dtype=torch.float64)
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
return self.x_train[idx], self.y_train[idx]
# train-test split
df['split'] = np.random.randn(df.shape[0], 1)
msk = np.random.rand(len(df)) <= 0.8
train = df[msk]
test = df[~msk]
train.drop(columns=['split'], inplace=True)
test.drop(columns=['split'], inplace=True)
train.dropna(axis=0, how='any', inplace=True)
test.dropna(axis=0, how='any', inplace=True)
# load data
pcap_train = PcapDataset(train)
print(pcap_train.y_train.unique())
pcap_test = PcapDataset(test)
print(pcap_test.y_train.unique())
pcap_train.y_train[pcap_train.y_train==16] = 4
pcap_test.y_train[pcap_test.y_train==16] = 4
pcap_train.y_train[pcap_train.y_train==12] = 7
pcap_test.y_train[pcap_test.y_train==12] = 7
pcap_train.y_train[pcap_train.y_train==10] = 8
pcap_test.y_train[pcap_test.y_train==10] = 8
print(pcap_train.y_train.unique())
print(pcap_test.y_train.unique())
train_loader = torch.utils.data.DataLoader(pcap_train, batch_size=25, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(pcap_test, batch_size=25, shuffle=False, drop_last=True)
"""# **CNN - train and evaluate model on original data** (packets before sampling)"""
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv1d(1, 200, 5, 1, 0)
self.conv2 = nn.Conv1d(200, 100, 5, 1, 0)
self.dropout = nn.Dropout(p=0.05)
self.fc1 = nn.Linear(37200, 10000)
self.fc2 = nn.Linear(10000, 2000)
self.fc3 = nn.Linear(2000, 500)
self.fc4 = nn.Linear(500, 100)
self.out = nn.Linear(100,10)
def forward(self, x):
x = F.avg_pool1d(F.relu(self.conv1(x)),2)
x = self.dropout(x)
x = F.avg_pool1d(F.relu(self.conv2(x)),2)
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = self.out(x)
return x
model = CNN().cuda()
print(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
# Train the model
train_loss = []
interval_tuples = []
start = time.time()
for epoch in range(5):
running_train_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
labels = labels.long()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
# zero the parameters gradients
optimizer.zero_grad()
# forward + backward + optimization
outputs = model(inputs)
#print(outputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_train_loss += loss.item()
if (i+1) % 100 == 0:
interval_tuples.append(str((epoch + 1, i + 1)))
print("[{}, {}] loss: {}".format(epoch + 1, i + 1, running_train_loss / 100))
train_loss.append(running_train_loss / 100)
running_train_loss = 0.0
stop = time.time()
original_time = stop - start
print("Training time: {}".format(original_time))
total = 0
correct = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
labels = labels.float()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
original_accuracy = (100 * correct / total)
print('Accuracy: {}%'.format(original_accuracy))
"""# **Packets' sampling experiments**"""
def trimmed_packet(df, trimmed_length):
# Systematic sampling - trimming the packet (first trimmed_length_bytes)
return df[df.columns[0:trimmed_length]]
def every_other_x_packet(df, x):
# Systematic sampling - choosing every other xth element of the packet
return df[df.columns[::x]]
def random_sampled_packet(df, sampled_length):
# Random sampling (reservoir)
i = 0
r = np.zeros((df.shape[0],sampled_length))
for i in range(df.shape[0]):
for j in range(sampled_length):
r[i,j] = df.iloc[i,j]
for j in range(sampled_length,df.shape[1]):
l = random.randrange(j + 1)
if l < sampled_length:
k = random.randrange(sampled_length)
r[i,k] = df.iloc[i,k]
return pd.DataFrame(r)
df_train = pd.DataFrame(pcap_train.x_train.numpy())
df_test = pd.DataFrame(pcap_test.x_train.numpy())
# 50%
random_train = random_sampled_packet(df_train, 750)
random_test = random_sampled_packet(df_test, 750)
trimmed_train = trimmed_packet(df_train, 750)
trimmed_test = trimmed_packet(df_test, 750)
everyother_train = every_other_x_packet(df_train, 2)
everyother_test = every_other_x_packet(df_test, 2)
# 20%
random_train = random_sampled_packet(df_train, 300)
random_test = random_sampled_packet(df_test, 300)
trimmed_train = trimmed_packet(df_train, 300)
trimmed_test = trimmed_packet(df_test, 300)
everyother_train = every_other_x_packet(df_train, 5)
everyother_test = every_other_x_packet(df_test, 5)
# 10%
random_train = random_sampled_packet(df_train, 150)
random_test = random_sampled_packet(df_test, 150)
trimmed_train = trimmed_packet(df_train, 150)
trimmed_test = trimmed_packet(df_test, 150)
everyother_train = every_other_x_packet(df_train, 10)
everyother_test = every_other_x_packet(df_test, 10)
# 5%
random_train = random_sampled_packet(df_train, 75)
random_test = random_sampled_packet(df_test, 75)
trimmed_train = trimmed_packet(df_train, 75)
trimmed_test = trimmed_packet(df_test, 75)
everyother_train = every_other_x_packet(df_train, 20)
everyother_test = every_other_x_packet(df_test, 20)
# 3%
random_train = random_sampled_packet(df_train, 45)
random_test = random_sampled_packet(df_test, 45)
trimmed_train = trimmed_packet(df_train, 45)
trimmed_test = trimmed_packet(df_test, 45)
everyother_train = every_other_x_packet(df_train, 33)
everyother_test = every_other_x_packet(df_test, 33)
# 1%
random_train = random_sampled_packet(df_train, 15)
random_test = random_sampled_packet(df_test, 15)
trimmed_train = trimmed_packet(df_train, 15)
trimmed_test = trimmed_packet(df_test, 15)
everyother_train = every_other_x_packet(df_train, 100)
everyother_test = every_other_x_packet(df_test, 100)
random_train_tensor = torch.tensor(random_train.values)
random_test_tensor = torch.tensor(random_test.values)
trimmed_train_tensor = torch.tensor(trimmed_train.values)
trimmed_test_tensor = torch.tensor(trimmed_test.values)
everyother_train_tensor = torch.tensor(everyother_train.values)
everyother_test_tensor = torch.tensor(everyother_test.values)
#pcap_train.x_train = random_train_tensor
#pcap_test.x_train = random_test_tensor
#pcap_train.x_train = trimmed_train_tensor
#pcap_test.x_train = trimmed_test_tensor
pcap_train.x_train = everyother_train_tensor
pcap_test.x_train = everyother_test_tensor
train_loader = torch.utils.data.DataLoader(pcap_train, batch_size=25, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(pcap_test, batch_size=25, shuffle=False, drop_last=True)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv1d(1, 200, 5, 1, 0)
self.conv2 = nn.Conv1d(200, 100, 5, 1, 0)
self.dropout = nn.Dropout(p=0.05)
self.fc1 = nn.Linear(100, 10000)
self.fc2 = nn.Linear(10000, 2000)
self.fc3 = nn.Linear(2000, 500)
self.fc4 = nn.Linear(500, 100)
self.out = nn.Linear(100,10)
def forward(self, x):
x = F.avg_pool1d(F.relu(self.conv1(x)),2)
x = self.dropout(x)
#x = F.avg_pool1d(F.relu(self.conv2(x)),2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
#print(x.shape)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = self.out(x)
return x
model = CNN().cuda()
print(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
# Train the model
train_loss = []
interval_tuples = []
start = time.time()
for epoch in range(5):
running_train_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
labels = labels.long()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
# zero the parameters gradients
optimizer.zero_grad()
# forward + backward + optimization
outputs = model(inputs)
#print(outputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_train_loss += loss.item()
if (i+1) % 100 == 0:
interval_tuples.append(str((epoch + 1, i + 1)))
print("[{}, {}] loss: {}".format(epoch + 1, i + 1, running_train_loss / 100))
train_loss.append(running_train_loss / 100)
running_train_loss = 0.0
stop = time.time()
print("Training time: {}".format(stop-start))
total = 0
correct = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
labels = labels.float()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy: {}%'.format((100 * correct / total)))
"""**Results' plot**"""
plt.figure(figsize=[15,10])
plt.plot([0.01, 0.03, 0.05, 0.1, 0.2, 0.5], [80.18, 97.24, 98.25, 97.5, 95.68, 94.21],
[0.01, 0.03, 0.05, 0.1, 0.2, 0.5], [84.74, 95.91, 95.72, 95.36, 96.87, 95.49],
[0.01, 0.03, 0.05, 0.1, 0.2, 0.5], [37.39, 42.16, 65.98, 82.02, 88.37, 92.64])
plt.xticks([0.01, 0.03, 0.05, 0.1, 0.2, 0.5])
plt.hlines(93.47, 0.01, 0.5, linestyle='dashed', color='r')
plt.legend(['random', 'trimming', 'every other', 'original accuracy'], prop={'size': 20})
plt.ylim((35,100))
plt.xlabel('Sample ratio', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.figure(figsize=[15,10])
plt.plot([0.01, 0.03, 0.05, 0.1, 0.2, 0.5], [37, 47, 58, 85, 140, 304])
plt.xticks([0.01, 0.03, 0.05, 0.1, 0.2, 0.5])
plt.ylim((30,620))
plt.hlines(596, 0.01, 0.5, linestyle='dashed', color='r')
plt.legend(['average training time', 'original training time'], prop={'size': 20}, loc='lower right')
plt.xlabel('Sample ratio', fontsize=20)
plt.ylabel('Training time', fontsize=20)
"""## **Dimensionality Reduction Experiments**"""
class CNN_JL(nn.Module):
def __init__(self, dim):
super(CNN_JL, self).__init__()
self.conv1 = nn.Conv1d(1, 200, 5, 1, 0)
self.conv2 = nn.Conv1d(200, 100, 5, 1, 0)
self.dropout = nn.Dropout(p=0.05)
self.fc1 = nn.Linear(dim, 10000)
self.fc2 = nn.Linear(10000, 2000)
self.fc3 = nn.Linear(2000, 500)
self.fc4 = nn.Linear(500, 100)
self.out = nn.Linear(100,10)
def forward(self, x):
x = F.avg_pool1d(F.relu(self.conv1(x)),2)
x = self.dropout(x)
x = F.avg_pool1d(F.relu(self.conv2(x)),2)
# x = F.relu(self.conv2(x))
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = self.out(x)
return x
df_train = pd.DataFrame(pcap_train.x_train.numpy())
df_test = pd.DataFrame(pcap_test.x_train.numpy())
from sklearn.random_projection import johnson_lindenstrauss_min_dim
# get list of components for each epsilon in range 0,1 to 0.9
components = []
for eps in list(np.linspace(0.1, 0.9, 9)):
components.append(johnson_lindenstrauss_min_dim(1500,eps))
components = pd.Series(components)
components = components[components < 1500].sort_values(ascending=True)
components = list(components)
print(components)
# Using Johnson-Lindenstrauss dimensionality reduction technique
acc_jl_list = []
from sklearn.random_projection import SparseRandomProjection
# train model and get predictions
for dim in components:
# dimensionality reduction
# create the transformation
sp = SparseRandomProjection(n_components = dim)
x_train_np = sp.fit_transform(df_train)
x_train_pd = pd.DataFrame(x_train_np)
# evaluate the model and update the list of accuracies
x_test_np = sp.transform(df_test)
x_test_pd = pd.DataFrame(x_test_np)
print("{} dimensions after JL dimensionality reduction".format(dim))
# build model
model = CNN_JL(int(np.floor((np.floor((dim-4)/2)-4)/2)*100)).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
train_loss = []
interval_tuples = []
start = time.time()
pcap_train.x_train = torch.tensor(x_train_pd.values)
pcap_test.x_train = torch.tensor(x_test_pd.values)
train_loader = torch.utils.data.DataLoader(pcap_train, batch_size=25, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(pcap_test, batch_size=25, shuffle=False, drop_last=True)
for epoch in range(5): # todo - change back to 5 when running
running_train_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
labels = labels.long()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
# zero the parameters gradients
optimizer.zero_grad()
# forward + backward + optimization
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_train_loss += loss.item()
if (i+1) % 100 == 0:
interval_tuples.append(str((epoch + 1, i + 1)))
print("[{}, {}] loss: {}".format(epoch + 1, i + 1, running_train_loss / 100))
train_loss.append(running_train_loss / 100)
running_train_loss = 0.0
stop = time.time()
print("Training time: {}".format(stop-start))
# test accuracy
total = 0
correct = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
labels = labels.float()
inputs = np.expand_dims(inputs, axis=1)
inputs = torch.Tensor(inputs)
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = (100 * correct / total)
acc_jl_list.append(accuracy)
print('Accuracy: {}%'.format(accuracy))
# create the figure
plt.figure(figsize=[12,8])
plt.title("Accuracy after JL dimensionality reduction")
plt.xticks(components)
plt.xlabel("Number of dimensions")
plt.ylabel("Accuracy")
plt.ylim((79.5,85))
# plot the baseline and random projection accuracies
plt.plot(components, acc_jl_list)
plt.hlines(93.47, 0.01, 0.5, linestyle='dashed', color='r')
# dimensionality reduction with PCA
df_train = pd.DataFrame(pcap_train.x_train.numpy())
df_test = pd.DataFrame(pcap_test.x_train.numpy())
from sklearn.decomposition import PCA
acc_pca_list = []
from sklearn.random_projection import SparseRandomProjection
# train model and get predictions
for dim in components:
# dimensionality reduction
# create the transformation
pca = PCA(n_components = dim)
x_train_np = pca.fit_transform(df_train)
x_train_pd =
|
pd.DataFrame(x_train_np)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 2:53:30 2018
@author: <NAME>
"""
import io
import os
import sys
import traceback
import pandas as pd
import numpy as np
def read_string(string, sep=',', header='infer', names=None, nrows=None, lineterminator=None, comment=None):
try:
return pd.read_csv(io.StringIO(string), sep=sep, header=header, names=names, nrows=nrows, lineterminator=lineterminator, comment=comment)
except:
print('Error Converting String to DataFrame: {}'.format(traceback.print_exc()))
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import io
import random
import os
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from itertools import product
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
from sklearn.metrics import recall_score, f1_score, precision_score, accuracy_score, roc_auc_score, roc_curve
from typing import List
from collections import defaultdict
from albumentations import Compose
from algorithms.utils import optimize_threshold
from data_viz.functions import render_mpl_table, plot_image, create_countplot
from preprocessing.image_processing import full_image_pipeline, crop_image_pipeline
from utils.config import (
MODEL_FILES, ENSEMBLER_CONFIG, CLASSIFICATION_METRICS, CLASSIFICATION_DATA_AUGMENTATION_FUNCS
)
from utils.functions import get_path, search_files, get_filename
from algorithms.utils import apply_bootstrap
sns.set(style='white')
sns.despine()
class DataVisualizer:
"""
Clase para realizar las visualizaciones de los modelos de la red
"""
metrics = [f.lower() if type(f) is str else f.__name__ for f in CLASSIFICATION_METRICS.values()] + \
['accuracy', 'loss']
labels = {
'BENIGN': 0,
'MALIGNANT': 1
}
def __init__(self, config: MODEL_FILES, img_type: str):
self.conf = config
self.img_type = img_type
@staticmethod
def get_dataframe_from_logs(dirname: io, metrics: list) -> pd.DataFrame:
"""
Función utilizada para crear gráficas a partir de los historiales generados por keras durante el entrenamiento.
:param dirname: directorio de almacenado del archivo de salida
:param metrics: lista de métricas a plotear. Estas métricas deben estar en los logs
"""
# Lista para almacenar las métricas
data_list = []
# Se itera sobre los archivos almacenados en el directorio para almacenarlos en un dataframe. Estos archivos
# se filtrarán mediante el nombre del test asignado durante el entrenamiento.
for file in search_files(dirname, 'csv'):
# Se lee el dataframe
data = pd.read_csv(file, sep=';')
# Se recupera el nombre del modelo a partir del nombre asignado al log.
data.loc[:, ['Model', 'Phase']] = get_filename(file).split('_')
data.loc[:, ['Weights', 'FrozenLayers']] = file.split(os.sep)[-3:-1]
data.loc[:, 'epoch'] = data.index + 1
# Se obtienen aquellas columnas que no contienen en el nombre alguna de las métricas definidas por el
# usuario para evitar que se pivoten.
melt_cols = [col for col in data.columns if not any(i in col for i in metrics)]
# Se pivotan las métricas en una columna que se llamará Metrics_value. El nombre de cada métrica se
# asignará a la columna Metrics_name.
data_melt = data.melt(id_vars=melt_cols, var_name='Metrics_name', value_name='Metrics_value')
# Se añade la columna modo a partir del nombre de la métrica. Por defecto, el callback de keras añade
# la palabra val a cada una de las métricas, por lo que se utilizará dicha casuística para segregar en
# entrenamiento y validación.
data_melt.loc[:, 'Mode'] = np.where(data_melt.Metrics_name.str.contains('val', case=False), 'Val', 'Train')
# Se añaden las métricas de cada modelo.
data_list.append(data_melt)
return pd.concat(data_list, ignore_index=True)
@staticmethod
def get_dataframe_from_preds(dirname: io) -> pd.DataFrame:
"""
Función para crear un dataframe único a partir de las predicciones de cada red neuronal
:param dirname: Directorio que almacena las predicciones de las redes
:return: dataframe con los datos unidos
"""
l = []
# Se buscan todos los archivos csv del directorio
for file in search_files(dirname, 'csv'):
# Se lee el fichero
df =
|
pd.read_csv(file, sep=';')
|
pandas.read_csv
|
#!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
import json
from datetime import datetime
from hashlib import md5
import os.path as path
import argparse
import os.path as path
import pysolr
from uuid import uuid1
DEBUG = True
filename = 'output/PATH_005'
filename = 'output/PATH_147'
filename = 'output/PATH_016'
filename = 'output/PATH_024'
filename = 'output/PATH_008'
filename = 'output/PATH_090'
filename = 'output/AA_132'
filename = 'output/PATH_004'
filename = 'output/AA_003'
filename = 'output/HD_001'
filename = 'output/TR_002'
filename = 'output/PATH_004'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process trains services \
file to json')
parser.add_argument('inputfile', type=str, help='name of working \
timetable file to parse')
args = parser.parse_args()
filename = args.inputfile
DEBUG = False
filestub = path.basename(filename)
if DEBUG:
print(filename)
pd.set_option('display.max_columns', None)
ISO8601_DATE = datetime(1900, 1, 1)
DAY = pd.offsets.Day()
MONDAY = pd.offsets.Week(weekday=0)
def header_date(this_column):
return pd.to_datetime(this_column, format='%d%m%y').dt.strftime('%Y-%m-%d')
def wtt_date(this_column):
return pd.to_datetime(this_column, format='%y%m%d').dt.strftime('%Y-%m-%d')
def wtt_datetime(this_column):
return this_column.dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def wtt_time(this_column, format='%H%M%S'):
this_column = this_column.str.replace('H', '30').str.replace(' ', '00')
return pd.to_datetime(this_column, format=format)
def blank_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().all() or (this_frame[n] == '').all()]
def strip_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().any()]
def days_str(this_series):
return pd.to_datetime(this_series).apply(lambda v: '{:b}'.format(64 >> v.weekday()).zfill(7))
def get_dates(this_df):
no_idx = this_df['Date To'].str.isspace()
this_df.loc[no_idx, 'Days'] = days_str(this_df.loc[no_idx, 'Date From'])
this_df.loc[no_idx, 'Date To'] = this_df.loc[no_idx, 'Date From']
this_df['Date From'] = pd.to_datetime(this_df['Date From'], format='%y%m%d')
this_df['Date To'] = pd.to_datetime(this_df['Date To'], format='%y%m%d')
this_df['Dates'] = wtt_date(this_df['Date From'] - MONDAY) + '.' + wtt_date(this_df['Date To'] + MONDAY) + '.' + this_df['Days']
this_df['Date From'] = wtt_datetime(this_df['Date From'])
this_df['Date To'] = wtt_datetime(this_df['Date To'])
return this_df[['Date From', 'Date To', 'Dates', 'Days']]
def header_record(records):
"""process CIF file header record from 80-character line string"""
this_array = [[line[0:2], line[2:22], line[22:28], line[28:32], line[32:39], line[39:46], line[46:47], line[47:48], line[48:54], line[54:60]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'File Mainframe Identity', 'Date of Extract', 'Time of Extract', 'Current File Ref', 'Last File Ref', 'Bleed off Update Ind', 'Version', 'User Extract Start Date', 'User Extract End Date'])
this_frame['Extract Datetime'] = pd.to_datetime(this_frame['Time of Extract'] + this_frame['Date of Extract'], format='%H%M%d%m%y').dt.strftime('%Y-%m-%dT%H:%M:%SZ')
this_frame['Extract Interval'] = header_date(this_frame['User Extract Start Date']) + '/' + header_date(this_frame['User Extract End Date'])
this_frame = this_frame.drop(['User Extract Start Date', 'User Extract End Date', 'Time of Extract', 'Date of Extract'], axis=1)
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def tiploc_record(records):
"""return CIF file TIPLOC object from 80-character line string"""
this_array = [[line[0:2],line[2:9],line[9:11],line[11:17],line[17:18],line[18:44],line[44:49],line[49:53],line[53:56],line[56:72],line[72:79]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','TIPLOC','Capitals Identification','Nalco','NLC check character','TPS Description','Stanox','PO MCP','CRS','Description','New TIPLOC'])
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def notes_record(records):
"""return CIF file train notes object en route object from 80-character line string"""
this_array = [[line[0:2],line[2:3],line[3:80]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','Note Type','Note'])
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def association_record(records):
"""return CIF file train-association object from 80-character line string"""
this_array = [[line[0:2],line[2:3],line[3:9],line[9:15],line[15:21],line[21:27],line[27:34],line[34:36],line[36:37],line[37:44],line[44:45],line[45:46],line[47:48],line[79:80]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','Transaction','Main UID','UID','Date From','Date To','Days','Category','Indicator','Location','Base Suffix','Location Suffix','Type','STP'])
this_frame[['Date From', 'Date To', 'Dates', 'Days']] = get_dates(this_frame)
#this_frame = this_frame.drop(['Date From', 'Date To'], axis=1)
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def wtt_records(records):
this_array = [[line[0:2],line] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Data'])
this_frame['key'] = [md5(x.encode()).hexdigest() for x in records]
this_frame.loc[this_frame['ID'] == 'BS', 'UUID'] = this_frame.loc[this_frame['ID'] == 'BS', 'key']
this_frame = this_frame.fillna(method='ffill')
return this_frame
def pa_record(this_df):
this_array = [['PA', line[2:3], line[3:9], line[9:15], line[15:21], line[21:28], line[79:80]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Transaction','UID','Date From','Date To','Days','STP'])
this_frame[['Date From', 'Date To', 'Dates', 'Days']] = get_dates(this_frame)
#this_frame = this_frame.drop(['Date From', 'Date To'], axis=1)
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def bs_record(this_df):
this_array = [[line[0:2], line[28:29], line[29:30], line[30:32], line[32:36], line[36:40], line[41:49], line[49:50], line[50:53], line[53:57], line[57:60], line[60:66], line[66:67], line[67:68], line[68:69], line[70:74], line[74:78]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Bank Holiday Running', 'Train Status', 'Train Category', 'Headcode', 'NRS Headcode', 'Train Service', 'Portion Id', 'Power Type', 'Timing Load', 'Speed', 'Characteristics', 'Seating Class', 'Sleepers', 'Reservations', 'Catering', 'Service Branding'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def bx_record(this_df):
this_array = [[line[0:2], line[6:11], line[11:13], line[13:14], line[14:22], line[22:23]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'UIC', 'ATOC', 'Applicable Timetable', 'RSID', 'Data Source'])
this_frame['UUID'] = this_df['UUID'].tolist()
return this_frame
def origin_location(this_df):
"""return CIF file depart from origin object, updated last reported time and
train operation duration from 80-character line string, the last reported time
and the train operation duration"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:19], line[19:22], line[22:25], line[25:27], line[27:29], line[29:41], line[41:43], line[43:46]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule', 'Public Schedule', 'Platform', 'Line', 'Engineering Allowance', 'Pathing Allowance', 'Activity', 'Performance Allowance', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['id'] += '.0'
this_frame['T'] = 'OD'
this_frame['index'] = this_df.index.tolist()
return this_frame
def intermediate_location(this_df):
"""return CIF file intermediate location object, updated last reported time
and train operation duration from 80-character line string, the last reported
time and the train operation duration"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:20], line[20:25], line[25:29], line[29:33], line[33:36], line[36:39], line[39:42], line[42:54], line[54:56], line[56:58], line[58:60], line[60:65]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule Arrival', 'Schedule Departure', 'Schedule Pass', 'Public Arrival', 'Public Departure', 'Platform', 'Line', 'Path', 'Activity', 'Engineering Allowance', 'Pathing Allowance', 'Performance Allowance', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['index'] = this_df.index.tolist()
idx_pass = (~this_frame['Schedule Pass'].str.isspace())
df_arrival = this_frame[~idx_pass].rename(columns={'Schedule Arrival': 'Schedule', 'Public Arrival': 'Public Schedule'})
df_arrival = df_arrival.drop(['Schedule Departure', 'Public Departure', 'Schedule Pass'], axis=1)
df_arrival['T'] = 'IA'
df_arrival['id'] += '.1'
df_departure = this_frame[~idx_pass].rename(columns={'Schedule Departure': 'Schedule', 'Public Departure': 'Public Schedule'})
df_departure = df_departure.drop(['Schedule Arrival', 'Public Arrival', 'Schedule Pass'], axis=1)
df_departure['T'] = 'ID'
df_departure['id'] += '.3'
df_pass = this_frame[idx_pass].rename(columns={'Schedule Pass': 'Schedule'})
df_pass = df_pass.drop(['Schedule Arrival', 'Public Arrival', 'Schedule Departure', 'Public Departure'], axis=1)
df_pass['Public Schedule'] = '0000'
df_pass['T'] = 'IP'
df_pass['id'] += '.2'
return pd.concat([df_arrival, df_departure, df_pass], sort=False)
def terminus_location(this_df):
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:19], line[19:22], line[22:25], line[25:27], line[37:40]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule', 'Public Schedule', 'Platform', 'Path', 'Activity', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['T'] = 'TA'
this_frame['id'] += '.4'
for key in ['Line', 'Engineering Allowance', 'Pathing Allowance', 'Performance Allowance']:
this_frame[key] = ''
this_frame['index'] = this_df.index.tolist()
return this_frame
def change_en_route(this_df):
"""return CIF file train change en route object from 80-character line string"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:12], line[12:16], line[16:20], line[21:29], line[29:30], line[30:33], line[33:37], line[37:40], line[40:46], line[46:47], line[47:48], line[48:49], line[50:54], line[54:58], line[62:67], line[67:75]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Train Category', 'Headcode', 'NRS Headcode', 'Train Service', 'Portion Id', 'Power Type', 'Timing Load', 'Speed', 'Operating Characteristics', 'Seating Class', 'Sleepers', 'Reservations', 'Catering', 'Service Branding', 'UIC', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def get_wtt(this_df):
LO_frame = origin_location(this_df[this_df['ID'] == 'LO'])
LI_frame = intermediate_location(this_df[this_df['ID'] == 'LI'])
LT_frame = terminus_location(this_df[this_df['ID'] == 'LT'])
WTT =
|
pd.concat([LO_frame, LI_frame, LT_frame], sort=False)
|
pandas.concat
|
"""\
Main class and helper functions.
"""
import warnings
import collections.abc as cabc
from collections import OrderedDict
from copy import copy, deepcopy
from enum import Enum
from functools import partial, singledispatch
from pathlib import Path
from os import PathLike
from textwrap import dedent
from typing import Any, Union, Optional # Meta
from typing import Iterable, Sequence, Mapping, MutableMapping # Generic ABCs
from typing import Tuple, List # Generic
import h5py
from natsort import natsorted
import numpy as np
from numpy import ma
import pandas as pd
from pandas.api.types import infer_dtype, is_string_dtype, is_categorical_dtype
from scipy import sparse
from scipy.sparse import issparse, csr_matrix
from .raw import Raw
from .index import _normalize_indices, _subset, Index, Index1D, get_vector
from .file_backing import AnnDataFileManager, to_memory
from .access import ElementRef
from .aligned_mapping import (
AxisArrays,
AxisArraysView,
PairwiseArrays,
PairwiseArraysView,
Layers,
LayersView,
)
from .views import (
ArrayView,
DictView,
DataFrameView,
as_view,
_resolve_idxs,
)
from .sparse_dataset import SparseDataset
from .. import utils
from ..utils import convert_to_dict, ensure_df_homogeneous
from ..logging import anndata_logger as logger
from ..compat import (
ZarrArray,
ZappyArray,
DaskArray,
Literal,
_slice_uns_sparse_matrices,
_move_adj_mtx,
_overloaded_uns,
OverloadedDict,
)
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArray = ZarrArray
ZappyArray = ZappyArray
DaskArray = DaskArray
@classmethod
def classes(cls):
return tuple(c.value for c in cls.__members__.values())
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and "of" in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [f"{key_multicol}{i + 1:03}of{n_keys:03}" for i in range(n_keys)]
return keys
def _check_2d_shape(X):
"""\
Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError(
f"X needs to be 2-dimensional, not {len(X.shape)}-dimensional."
)
@singledispatch
def _gen_dataframe(anno, length, index_names):
if anno is None or len(anno) == 0:
return pd.DataFrame(index=pd.RangeIndex(0, length, name=None).astype(str))
for index_name in index_names:
if index_name in anno:
return pd.DataFrame(
anno,
index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name],
)
return pd.DataFrame(anno, index=pd.RangeIndex(0, length, name=None).astype(str))
@_gen_dataframe.register(pd.DataFrame)
def _(anno, length, index_names):
anno = anno.copy()
if not is_string_dtype(anno.index):
warnings.warn("Transforming to str index.", ImplicitModificationWarning)
anno.index = anno.index.astype(str)
return anno
@_gen_dataframe.register(pd.Series)
@_gen_dataframe.register(pd.Index)
def _(anno, length, index_names):
raise ValueError(f"Cannot convert {type(anno)} to DataFrame")
class ImplicitModificationWarning(UserWarning):
"""\
Raised whenever initializing an object or assigning a property changes
the type of a part of a parameter or the value being assigned.
Examples
========
>>> import pandas as pd
>>> adata = AnnData(obs=pd.DataFrame(index=[0, 1, 2])) # doctest: +SKIP
ImplicitModificationWarning: Transforming to str index.
"""
pass
class AnnData(metaclass=utils.DeprecationMixinMeta):
"""\
An annotated data matrix.
:class:`~anndata.AnnData` stores a data matrix :attr:`X` together with annotations
of observations :attr:`obs` (:attr:`obsm`, :attr:`obsp`),
variables :attr:`var` (:attr:`varm`, :attr:`varp`),
and unstructured annotations :attr:`uns`.
.. figure:: https://falexwolf.de/img/scanpy/anndata.svg
:width: 350px
An :class:`~anndata.AnnData` object `adata` can be sliced like a
:class:`~pandas.DataFrame`,
for instance `adata_subset = adata[:, list_of_variable_names]`.
:class:`~anndata.AnnData`’s basic structure is similar to R’s ExpressionSet
[Huber15]_. If setting an `.h5ad`-formatted HDF5 backing file `.filename`,
data remains on the disk but is automatically loaded into memory if needed.
See this `blog post`_ for more details.
.. _blog post: http://falexwolf.de/blog/171223_AnnData_indexing_views_HDF5-backing/
Parameters
----------
X
A #observations × #variables data matrix. A view of the data is used if the
data type matches, otherwise, a copy is made.
obs
Key-indexed one-dimensional observations annotation of length #observations.
var
Key-indexed one-dimensional variables annotation of length #variables.
uns
Key-indexed unstructured annotation.
obsm
Key-indexed multi-dimensional observations annotation of length #observations.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
varm
Key-indexed multi-dimensional variables annotation of length #variables.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
layers
Key-indexed multi-dimensional arrays aligned to dimensions of `X`.
dtype
Data type used for storage.
shape
Shape tuple (#observations, #variables). Can only be provided if `X` is `None`.
filename
Name of backing file. See :class:`h5py.File`.
filemode
Open mode of backing file. See :class:`h5py.File`.
See Also
--------
read_h5ad
read_csv
read_excel
read_hdf
read_loom
read_zarr
read_mtx
read_text
read_umi_tools
Notes
-----
:class:`~anndata.AnnData` stores observations (samples) of variables/features
in the rows of a matrix.
This is the convention of the modern classics of statistics [Hastie09]_
and machine learning [Murphy12]_,
the convention of dataframes both in R and Python and the established statistics
and machine learning packages in Python (statsmodels_, scikit-learn_).
Single dimensional annotations of the observation and variables are stored
in the :attr:`obs` and :attr:`var` attributes as :class:`~pandas.DataFrame`\\ s.
This is intended for metrics calculated over their axes.
Multi-dimensional annotations are stored in :attr:`obsm` and :attr:`varm`,
which are aligned to the objects observation and variable dimensions respectively.
Square matrices representing graphs are stored in :attr:`obsp` and :attr:`varp`,
with both of their own dimensions aligned to their associated axis.
Additional measurements across both observations and variables are stored in
:attr:`layers`.
Indexing into an AnnData object can be performed by relative position
with numeric indices (like pandas’ :meth:`~pandas.DataFrame.iloc`),
or by labels (like :meth:`~pandas.DataFrame.loc`).
To avoid ambiguity with numeric indexing into observations or variables,
indexes of the AnnData object are converted to strings by the constructor.
Subsetting an AnnData object by indexing into it will also subset its elements
according to the dimensions they were aligned to.
This means an operation like `adata[list_of_obs, :]` will also subset :attr:`obs`,
:attr:`obsm`, and :attr:`layers`.
Subsetting an AnnData object returns a view into the original object,
meaning very little additional memory is used upon subsetting.
This is achieved lazily, meaning that the constituent arrays are subset on access.
Copying a view causes an equivalent “real” AnnData object to be generated.
Attempting to modify a view (at any attribute except X) is handled
in a copy-on-modify manner, meaning the object is initialized in place.
Here’s an example::
batch1 = adata[adata.obs["batch"] == "batch1", :]
batch1.obs["value"] = 0 # This makes batch1 a “real” AnnData object
At the end of this snippet: `adata` was not modified,
and `batch1` is its own AnnData object with its own data.
Similar to Bioconductor’s `ExpressionSet` and :mod:`scipy.sparse` matrices,
subsetting an AnnData object retains the dimensionality of its constituent arrays.
Therefore, unlike with the classes exposed by :mod:`pandas`, :mod:`numpy`,
and `xarray`, there is no concept of a one dimensional AnnData object.
AnnDatas always have two inherent dimensions, :attr:`obs` and :attr:`var`.
Additionally, maintaining the dimensionality of the AnnData object allows for
consistent handling of :mod:`scipy.sparse` matrices and :mod:`numpy` arrays.
.. _statsmodels: http://www.statsmodels.org/stable/index.html
.. _scikit-learn: http://scikit-learn.org/
"""
_BACKED_ATTRS = ["X", "raw.X"]
# backwards compat
_H5_ALIASES = dict(
X={"X", "_X", "data", "_data"},
obs={"obs", "_obs", "smp", "_smp"},
var={"var", "_var"},
uns={"uns"},
obsm={"obsm", "_obsm", "smpm", "_smpm"},
varm={"varm", "_varm"},
layers={"layers", "_layers"},
)
_H5_ALIASES_NAMES = dict(
obs={"obs_names", "smp_names", "row_names", "index"},
var={"var_names", "col_names", "index"},
)
def __init__(
self,
X: Optional[Union[np.ndarray, sparse.spmatrix, pd.DataFrame]] = None,
obs: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
var: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
uns: Optional[Mapping[str, Any]] = None,
obsm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
varm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
layers: Optional[Mapping[str, Union[np.ndarray, sparse.spmatrix]]] = None,
raw: Optional[Mapping[str, Any]] = None,
dtype: Union[np.dtype, str] = "float32",
shape: Optional[Tuple[int, int]] = None,
filename: Optional[PathLike] = None,
filemode: Optional[Literal["r", "r+"]] = None,
asview: bool = False,
*,
obsp: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
varp: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
oidx: Index1D = None,
vidx: Index1D = None,
):
if asview:
if not isinstance(X, AnnData):
raise ValueError("`X` has to be an AnnData object.")
self._init_as_view(X, oidx, vidx)
else:
self._init_as_actual(
X=X,
obs=obs,
var=var,
uns=uns,
obsm=obsm,
varm=varm,
raw=raw,
layers=layers,
dtype=dtype,
shape=shape,
obsp=obsp,
varp=varp,
filename=filename,
filemode=filemode,
)
def _init_as_view(self, adata_ref: "AnnData", oidx: Index, vidx: Index):
if adata_ref.isbacked and adata_ref.is_view:
raise ValueError(
"Currently, you cannot index repeatedly into a backed AnnData, "
"that is, you cannot make a view of a view."
)
self._is_view = True
if isinstance(oidx, (int, np.integer)):
oidx = slice(oidx, oidx + 1, 1)
if isinstance(vidx, (int, np.integer)):
vidx = slice(vidx, vidx + 1, 1)
if adata_ref.is_view:
prev_oidx, prev_vidx = adata_ref._oidx, adata_ref._vidx
adata_ref = adata_ref._adata_ref
oidx, vidx = _resolve_idxs((prev_oidx, prev_vidx), (oidx, vidx), adata_ref)
# self._adata_ref is never a view
self._adata_ref = adata_ref
self._oidx = oidx
self._vidx = vidx
# the file is the same as of the reference object
self.file = adata_ref.file
# views on attributes of adata_ref
obs_sub = adata_ref.obs.iloc[oidx]
var_sub = adata_ref.var.iloc[vidx]
self._obsm = adata_ref.obsm._view(self, (oidx,))
self._varm = adata_ref.varm._view(self, (vidx,))
self._layers = adata_ref.layers._view(self, (oidx, vidx))
self._obsp = adata_ref.obsp._view(self, oidx)
self._varp = adata_ref.varp._view(self, vidx)
# Speical case for old neighbors, backwards compat. Remove in anndata 0.8.
uns_new = _slice_uns_sparse_matrices(
copy(adata_ref._uns), self._oidx, adata_ref.n_obs
)
# fix categories
self._remove_unused_categories(adata_ref.obs, obs_sub, uns_new)
self._remove_unused_categories(adata_ref.var, var_sub, uns_new)
# set attributes
self._obs = DataFrameView(obs_sub, view_args=(self, "obs"))
self._var = DataFrameView(var_sub, view_args=(self, "var"))
self._uns = uns_new
self._n_obs = len(self.obs)
self._n_vars = len(self.var)
# set data
if self.isbacked:
self._X = None
# set raw, easy, as it’s immutable anyways...
if adata_ref._raw is not None:
# slicing along variables axis is ignored
self._raw = adata_ref.raw[oidx]
self._raw._adata = self
else:
self._raw = None
def _init_as_actual(
self,
X=None,
obs=None,
var=None,
uns=None,
obsm=None,
varm=None,
varp=None,
obsp=None,
raw=None,
layers=None,
dtype="float32",
shape=None,
filename=None,
filemode=None,
):
# view attributes
self._is_view = False
self._adata_ref = None
self._oidx = None
self._vidx = None
# ----------------------------------------------------------------------
# various ways of initializing the data
# ----------------------------------------------------------------------
# If X is a data frame, we store its indices for verification
x_indices = []
# init from file
if filename is not None:
self.file = AnnDataFileManager(self, filename, filemode)
else:
self.file = AnnDataFileManager(self, None)
# init from AnnData
if isinstance(X, AnnData):
if any((obs, var, uns, obsm, varm, obsp, varp)):
raise ValueError(
"If `X` is a dict no further arguments must be provided."
)
X, obs, var, uns, obsm, varm, obsp, varp, layers, raw = (
X._X,
X.obs,
X.var,
X.uns,
X.obsm,
X.varm,
X.obsp,
X.varp,
X.layers,
X.raw,
)
# init from DataFrame
elif isinstance(X, pd.DataFrame):
# to verify index matching, we wait until obs and var are DataFrames
if obs is None:
obs = pd.DataFrame(index=X.index)
elif not isinstance(X.index, pd.RangeIndex):
x_indices.append(("obs", "index", X.index))
if var is None:
var = pd.DataFrame(index=X.columns)
elif not isinstance(X.columns, pd.RangeIndex):
x_indices.append(("var", "columns", X.columns))
X = ensure_df_homogeneous(X, "X")
# ----------------------------------------------------------------------
# actually process the data
# ----------------------------------------------------------------------
# check data type of X
if X is not None:
for s_type in StorageType:
if isinstance(X, s_type.value):
break
else:
class_names = ", ".join(c.__name__ for c in StorageType.classes())
raise ValueError(
f"`X` needs to be of one of {class_names}, not {type(X)}."
)
if shape is not None:
raise ValueError("`shape` needs to be `None` if `X` is not `None`.")
_check_2d_shape(X)
# if type doesn’t match, a copy is made, otherwise, use a view
if issparse(X) or isinstance(X, ma.MaskedArray):
# TODO: maybe use view on data attribute of sparse matrix
# as in readwrite.read_10x_h5
if X.dtype != np.dtype(dtype):
X = X.astype(dtype)
elif isinstance(X, ZarrArray):
X = X.astype(dtype)
else: # is np.ndarray or a subclass, convert to true np.ndarray
X = np.array(X, dtype, copy=False)
# data matrix and shape
self._X = X
self._n_obs, self._n_vars = self._X.shape
else:
self._X = None
self._n_obs = len([] if obs is None else obs)
self._n_vars = len([] if var is None else var)
# check consistency with shape
if shape is not None:
if self._n_obs == 0:
self._n_obs = shape[0]
else:
if self._n_obs != shape[0]:
raise ValueError("`shape` is inconsistent with `obs`")
if self._n_vars == 0:
self._n_vars = shape[1]
else:
if self._n_vars != shape[1]:
raise ValueError("`shape` is inconsistent with `var`")
# annotations
self._obs = _gen_dataframe(obs, self._n_obs, ["obs_names", "row_names"])
self._var = _gen_dataframe(var, self._n_vars, ["var_names", "col_names"])
# now we can verify if indices match!
for attr_name, x_name, idx in x_indices:
attr = getattr(self, attr_name)
if isinstance(attr.index, pd.RangeIndex):
attr.index = idx
elif not idx.equals(attr.index):
raise ValueError(f"Index of {attr_name} must match {x_name} of X.")
# unstructured annotations
self.uns = uns or OrderedDict()
# TODO: Think about consequences of making obsm a group in hdf
self._obsm = AxisArrays(self, 0, vals=convert_to_dict(obsm))
self._varm = AxisArrays(self, 1, vals=convert_to_dict(varm))
self._obsp = PairwiseArrays(self, 0, vals=convert_to_dict(obsp))
self._varp = PairwiseArrays(self, 1, vals=convert_to_dict(varp))
# Backwards compat for connectivities matrices in uns["neighbors"]
_move_adj_mtx({"uns": self._uns, "obsp": self._obsp})
self._check_dimensions()
self._check_uniqueness()
if self.filename:
assert not isinstance(
raw, Raw
), "got raw from other adata but also filename?"
if {"raw", "raw.X"} & set(self.file):
raw = dict(X=None, **raw)
if not raw:
self._raw = None
elif isinstance(raw, cabc.Mapping):
self._raw = Raw(self, **raw)
else: # is a Raw from another AnnData
self._raw = Raw(self, raw._X, raw.var, raw.varm)
# clean up old formats
self._clean_up_old_format(uns)
# layers
self._layers = Layers(self, layers)
def __sizeof__(self, show_stratified=None) -> int:
def get_size(X):
if issparse(X):
X_csr = csr_matrix(X)
return X_csr.data.nbytes + X_csr.indptr.nbytes + X_csr.indices.nbytes
else:
return X.__sizeof__()
size = 0
attrs = list(["_X", "_obs", "_var"])
attrs_multi = list(["_uns", "_obsm", "_varm", "varp", "_obsp", "_layers"])
for attr in attrs + attrs_multi:
if attr in attrs_multi:
keys = getattr(self, attr).keys()
s = sum([get_size(getattr(self, attr)[k]) for k in keys])
else:
s = get_size(getattr(self, attr))
if s > 0 and show_stratified:
str_attr = attr.replace("_", ".") + " " * (7 - len(attr))
print(f"Size of {str_attr}: {'%3.2f' % (s / (1024 ** 2))} MB")
size += s
return size
def _gen_repr(self, n_obs, n_vars) -> str:
if self.isbacked:
backed_at = f" backed at {str(self.filename)!r}"
else:
backed_at = ""
descr = f"AnnData object with n_obs × n_vars = {n_obs} × {n_vars}{backed_at}"
for attr in [
"obs",
"var",
"uns",
"obsm",
"varm",
"layers",
"obsp",
"varp",
]:
keys = getattr(self, attr).keys()
if len(keys) > 0:
descr += f"\n {attr}: {str(list(keys))[1:-1]}"
return descr
def __repr__(self) -> str:
if self.is_view:
return "View of " + self._gen_repr(self.n_obs, self.n_vars)
else:
return self._gen_repr(self.n_obs, self.n_vars)
def __eq__(self, other):
"""Equality testing"""
raise NotImplementedError(
"Equality comparisons are not supported for AnnData objects, "
"instead compare the desired attributes."
)
@property
def shape(self) -> Tuple[int, int]:
"""Shape of data matrix (:attr:`n_obs`, :attr:`n_vars`)."""
return self.n_obs, self.n_vars
@property
def X(self) -> Optional[Union[np.ndarray, sparse.spmatrix, ArrayView]]:
"""Data matrix of shape :attr:`n_obs` × :attr:`n_vars`."""
if self.isbacked:
if not self.file.is_open:
self.file.open()
X = self.file["X"]
if isinstance(X, h5py.Group):
X = SparseDataset(X)
# This is so that we can index into a backed dense dataset with
# indices that aren’t strictly increasing
if self.is_view:
X = _subset(X, (self._oidx, self._vidx))
elif self.is_view:
X = as_view(
_subset(self._adata_ref.X, (self._oidx, self._vidx)),
ElementRef(self, "X"),
)
else:
X = self._X
return X
# if self.n_obs == 1 and self.n_vars == 1:
# return X[0, 0]
# elif self.n_obs == 1 or self.n_vars == 1:
# if issparse(X): X = X.toarray()
# return X.flatten()
# else:
# return X
@X.setter
def X(self, value: Optional[Union[np.ndarray, sparse.spmatrix]]):
if value is None:
if self.isbacked:
raise NotImplementedError(
"Cannot currently remove data matrix from backed object."
)
if self.is_view:
self._init_as_actual(self.copy())
self._X = None
return
if not isinstance(value, StorageType.classes()) and not np.isscalar(value):
if hasattr(value, "to_numpy") and hasattr(value, "dtypes"):
value = ensure_df_homogeneous(value, "X")
else: # TODO: asarray? asanyarray?
value = np.array(value)
# If indices are both arrays, we need to modify them
# so we don’t set values like coordinates
# This can occur if there are succesive views
if (
self.is_view
and isinstance(self._oidx, np.ndarray)
and isinstance(self._vidx, np.ndarray)
):
oidx, vidx = np.ix_(self._oidx, self._vidx)
else:
oidx, vidx = self._oidx, self._vidx
if (
np.isscalar(value)
or (hasattr(value, "shape") and (self.shape == value.shape))
or (self.n_vars == 1 and self.n_obs == len(value))
or (self.n_obs == 1 and self.n_vars == len(value))
):
if not np.isscalar(value) and self.shape != value.shape:
# For assigning vector of values to 2d array or matrix
# Not neccesary for row of 2d array
value = value.reshape(self.shape)
if self.isbacked:
if self.is_view:
X = self.file["X"]
if isinstance(X, h5py.Group):
X = SparseDataset(X)
X[oidx, vidx] = value
else:
self._set_backed("X", value)
else:
if self.is_view:
if sparse.issparse(self._adata_ref._X) and isinstance(
value, np.ndarray
):
value = sparse.coo_matrix(value)
self._adata_ref._X[oidx, vidx] = value
else:
self._X = value
else:
raise ValueError(
f"Data matrix has wrong shape {value.shape}, "
f"need to be {self.shape}."
)
@X.deleter
def X(self):
self.X = None
@property
def layers(self) -> Union[Layers, LayersView]:
"""\
Dictionary-like object with values of the same dimensions as :attr:`X`.
Layers in AnnData are inspired by loompy’s :ref:`loomlayers`.
Return the layer named `"unspliced"`::
adata.layers["unspliced"]
Create or replace the `"spliced"` layer::
adata.layers["spliced"] = ...
Assign the 10th column of layer `"spliced"` to the variable a::
a = adata.layers["spliced"][:, 10]
Delete the `"spliced"` layer::
del adata.layers["spliced"]
Return layers’ names::
adata.layers.keys()
"""
return self._layers
@layers.setter
def layers(self, value):
layers = Layers(self, vals=convert_to_dict(value))
if self.is_view:
self._init_as_actual(self.copy())
self._layers = layers
@layers.deleter
def layers(self):
self.layers = dict()
@property
def raw(self) -> Raw:
"""\
Store raw version of :attr:`X` and :attr:`var` as `.raw.X` and `.raw.var`.
The :attr:`raw` attribute is initialized with the current content
of an object by setting::
adata.raw = adata
Its content can be deleted::
adata.raw = None
# or
del adata.raw
Upon slicing an AnnData object along the obs (row) axis, :attr:`raw`
is also sliced. Slicing an AnnData object along the vars (columns) axis
leaves :attr:`raw` unaffected. Note that you can call::
adata.raw[:, 'orig_variable_name'].X
to retrieve the data associated with a variable that might have been
filtered out or "compressed away" in :attr:`X`.
"""
return self._raw
@raw.setter
def raw(self, value: "AnnData"):
if value is None:
del self.raw
elif not isinstance(value, AnnData):
raise ValueError("Can only init raw attribute with an AnnData object.")
else:
if self.is_view:
self._init_as_actual(self.copy())
self._raw = Raw(value)
@raw.deleter
def raw(self):
if self.is_view:
self._init_as_actual(self.copy())
self._raw = None
@property
def n_obs(self) -> int:
"""Number of observations."""
return self._n_obs
@property
def n_vars(self) -> int:
"""Number of variables/features."""
return self._n_vars
def _set_dim_df(self, value: pd.DataFrame, attr: str):
if not isinstance(value, pd.DataFrame):
raise ValueError(f"Can only assign pd.DataFrame to {attr}.")
value_idx = self._prep_dim_index(value.index, attr)
if self.is_view:
self._init_as_actual(self.copy())
setattr(self, f"_{attr}", value)
self._set_dim_index(value_idx, attr)
def _prep_dim_index(self, value, attr: str) -> pd.Index:
"""Prepares index to be uses as obs_names or var_names for AnnData object.AssertionError
If a pd.Index is passed, this will use a reference, otherwise a new index object is created.
"""
if self.shape[attr == "var"] != len(value):
raise ValueError(
f"Length of passed value for {attr}_names is {len(value)}, but this AnnData has shape: {self.shape}"
)
if isinstance(value, pd.Index) and not isinstance(
value.name, (str, type(None))
):
raise ValueError(
f"AnnData expects .{attr}.index.name to be a string or None, "
f"but you passed a name of type {type(value.name).__name__!r}"
)
else:
value = pd.Index(value)
if not isinstance(value.name, (str, type(None))):
value.name = None
# fmt: off
if (
not isinstance(value, pd.RangeIndex)
and not infer_dtype(value) in ("string", "bytes")
):
sample = list(value[: min(len(value), 5)])
warnings.warn(dedent(
f"""
AnnData expects .{attr}.index to contain strings, but got values like:
{sample}
Inferred to be: {infer_dtype(value)}
"""
), # noqa
stacklevel=2,
)
# fmt: on
return value
def _set_dim_index(self, value: pd.Index, attr: str):
# Assumes _prep_dim_index has been run
if self.is_view:
self._init_as_actual(self.copy())
getattr(self, attr).index = value
for v in getattr(self, f"{attr}m").values():
if isinstance(v, pd.DataFrame):
v.index = value
@property
def obs(self) -> pd.DataFrame:
"""One-dimensional annotation of observations (`pd.DataFrame`)."""
return self._obs
@obs.setter
def obs(self, value: pd.DataFrame):
self._set_dim_df(value, "obs")
@obs.deleter
def obs(self):
self.obs = pd.DataFrame(index=self.obs_names)
@property
def obs_names(self) -> pd.Index:
"""Names of observations (alias for `.obs.index`)."""
return self.obs.index
@obs_names.setter
def obs_names(self, names: Sequence[str]):
names = self._prep_dim_index(names, "obs")
self._set_dim_index(names, "obs")
@property
def var(self) -> pd.DataFrame:
"""One-dimensional annotation of variables/ features (`pd.DataFrame`)."""
return self._var
@var.setter
def var(self, value: pd.DataFrame):
self._set_dim_df(value, "var")
@var.deleter
def var(self):
self.var = pd.DataFrame(index=self.var_names)
@property
def var_names(self) -> pd.Index:
"""Names of variables (alias for `.var.index`)."""
return self.var.index
@var_names.setter
def var_names(self, names: Sequence[str]):
names = self._prep_dim_index(names, "var")
self._set_dim_index(names, "var")
@property
def uns(self) -> MutableMapping:
"""Unstructured annotation (ordered dictionary)."""
uns = self._uns
if self.is_view:
uns = DictView(uns, view_args=(self, "_uns"))
uns = _overloaded_uns(self, uns)
return uns
@uns.setter
def uns(self, value: MutableMapping):
if not isinstance(value, MutableMapping):
raise ValueError(
"Only mutable mapping types (e.g. dict) are allowed for `.uns`."
)
if isinstance(value, (OverloadedDict, DictView)):
value = value.copy()
if self.is_view:
self._init_as_actual(self.copy())
self._uns = value
@uns.deleter
def uns(self):
self.uns = OrderedDict()
@property
def obsm(self) -> Union[AxisArrays, AxisArraysView]:
"""\
Multi-dimensional annotation of observations
(mutable structured :class:`~numpy.ndarray`).
Stores for each key a two or higher-dimensional :class:`~numpy.ndarray`
of length `n_obs`.
Is sliced with `data` and `obs` but behaves otherwise like a :term:`mapping`.
"""
return self._obsm
@obsm.setter
def obsm(self, value):
obsm = AxisArrays(self, 0, vals=convert_to_dict(value))
if self.is_view:
self._init_as_actual(self.copy())
self._obsm = obsm
@obsm.deleter
def obsm(self):
self.obsm = dict()
@property
def varm(self) -> Union[AxisArrays, AxisArraysView]:
"""\
Multi-dimensional annotation of variables/features
(mutable structured :class:`~numpy.ndarray`).
Stores for each key a two or higher-dimensional :class:`~numpy.ndarray`
of length `n_vars`.
Is sliced with `data` and `var` but behaves otherwise like a :term:`mapping`.
"""
return self._varm
@varm.setter
def varm(self, value):
varm = AxisArrays(self, 1, vals=convert_to_dict(value))
if self.is_view:
self._init_as_actual(self.copy())
self._varm = varm
@varm.deleter
def varm(self):
self.varm = dict()
@property
def obsp(self) -> Union[PairwiseArrays, PairwiseArraysView]:
"""\
Pairwise annotation of observations,
a mutable mapping with array-like values.
Stores for each key a two or higher-dimensional :class:`~numpy.ndarray`
whose first two dimensions are of length `n_obs`.
Is sliced with `data` and `obs` but behaves otherwise like a :term:`mapping`.
"""
return self._obsp
@obsp.setter
def obsp(self, value):
obsp = PairwiseArrays(self, 0, vals=convert_to_dict(value))
if self.is_view:
self._init_as_actual(self.copy())
self._obsp = obsp
@obsp.deleter
def obsp(self):
self.obsp = dict()
@property
def varp(self) -> Union[PairwiseArrays, PairwiseArraysView]:
"""\
Pairwise annotation of observations,
a mutable mapping with array-like values.
Stores for each key a two or higher-dimensional :class:`~numpy.ndarray`
whose first two dimensions are of length `n_var`.
Is sliced with `data` and `var` but behaves otherwise like a :term:`mapping`.
"""
return self._varp
@varp.setter
def varp(self, value):
varp = PairwiseArrays(self, 1, vals=convert_to_dict(value))
if self.is_view:
self._init_as_actual(self.copy())
self._varp = varp
@varp.deleter
def varp(self):
self.varp = dict()
def obs_keys(self) -> List[str]:
"""List keys of observation annotation :attr:`obs`."""
return self._obs.keys().tolist()
def var_keys(self) -> List[str]:
"""List keys of variable annotation :attr:`var`."""
return self._var.keys().tolist()
def obsm_keys(self) -> List[str]:
"""List keys of observation annotation :attr:`obsm`."""
return list(self._obsm.keys())
def varm_keys(self) -> List[str]:
"""List keys of variable annotation :attr:`varm`."""
return list(self._varm.keys())
def uns_keys(self) -> List[str]:
"""List keys of unstructured annotation."""
return sorted(list(self._uns.keys()))
@property
def isbacked(self) -> bool:
"""`True` if object is backed on disk, `False` otherwise."""
return self.filename is not None
@property
def is_view(self) -> bool:
"""`True` if object is view of another AnnData object, `False` otherwise."""
return self._is_view
@property
def filename(self) -> Optional[Path]:
"""\
Change to backing mode by setting the filename of a `.h5ad` file.
- Setting the filename writes the stored data to disk.
- Setting the filename when the filename was previously another name
moves the backing file from the previous file to the new file.
If you want to copy the previous file, use `copy(filename='new_filename')`.
"""
return self.file.filename
@filename.setter
def filename(self, filename: Optional[PathLike]):
# convert early for later comparison
filename = None if filename is None else Path(filename)
# change from backing-mode back to full loading into memory
if filename is None:
if self.filename is not None:
self.file._to_memory_mode()
else:
# both filename and self.filename are None
# do nothing
return
else:
if self.filename is not None:
if self.filename != filename:
# write the content of self to the old file
# and close the file
self.write()
self.filename.rename(filename)
else:
# do nothing
return
else:
# change from memory to backing-mode
# write the content of self to disk
self.write(filename, force_dense=True)
# open new file for accessing
self.file.open(filename, "r+")
# as the data is stored on disk, we can safely set self._X to None
self._X = None
def _set_backed(self, attr, value):
from .._io.utils import write_attribute
write_attribute(self.file._file, attr, value)
def _normalize_indices(self, index: Optional[Index]) -> Tuple[slice, slice]:
return _normalize_indices(index, self.obs_names, self.var_names)
# TODO: this is not quite complete...
def __delitem__(self, index: Index):
obs, var = self._normalize_indices(index)
# TODO: does this really work?
if not self.isbacked:
del self._X[obs, var]
else:
X = self.file["X"]
del X[obs, var]
self._set_backed("X", X)
if var == slice(None):
del self._obs.iloc[obs, :]
if obs == slice(None):
del self._var.iloc[var, :]
def __getitem__(self, index: Index) -> "AnnData":
"""Returns a sliced view of the object."""
oidx, vidx = self._normalize_indices(index)
return AnnData(self, oidx=oidx, vidx=vidx, asview=True)
def _remove_unused_categories(self, df_full, df_sub, uns):
for k in df_full:
if not
|
is_categorical_dtype(df_full[k])
|
pandas.api.types.is_categorical_dtype
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
# from http://imachordata.com/2016/02/05/you-complete-me/
@pytest.fixture
def df1():
return pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
def test_empty_column(df1):
"""Return dataframe if `columns` is empty."""
assert_frame_equal(df1.complete(), df1)
def test_MultiIndex_column(df1):
"""Raise ValueError if column is a MultiIndex."""
df = df1
df.columns = [["A", "B", "C"], list(df.columns)]
with pytest.raises(ValueError):
df1.complete(["Year", "Taxon"])
def test_column_duplicated(df1):
"""Raise ValueError if column is duplicated in `columns`"""
with pytest.raises(ValueError):
df1.complete(
columns=[
"Year",
"Taxon",
{"Year": lambda x: range(x.Year.min().x.Year.max() + 1)},
]
)
def test_type_columns(df1):
"""Raise error if columns is not a list object."""
with pytest.raises(TypeError):
df1.complete(columns="Year")
def test_fill_value_is_a_dict(df1):
"""Raise error if fill_value is not a dictionary"""
with pytest.raises(TypeError):
df1.complete(columns=["Year", "Taxon"], fill_value=0)
def test_wrong_column_fill_value(df1):
"""Raise ValueError if column in `fill_value` does not exist."""
with pytest.raises(ValueError):
df1.complete(columns=["Taxon", "Year"], fill_value={"year": 0})
def test_wrong_data_type_dict(df1):
"""
Raise ValueError if value in dictionary
is not a 1-dimensional object.
"""
with pytest.raises(ValueError):
df1.complete(columns=[{"Year": pd.DataFrame([2005, 2006, 2007])}])
frame = pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
wrong_columns = (
(frame, ["b", "Year"]),
(frame, [{"Yayay": range(7)}]),
(frame, ["Year", ["Abundant", "Taxon"]]),
(frame, ["Year", ("Abundant", "Taxon")]),
)
empty_sub_columns = [
(frame, ["Year", []]),
(frame, ["Year", {}]),
(frame, ["Year", ()]),
]
@pytest.mark.parametrize("frame,wrong_columns", wrong_columns)
def test_wrong_columns(frame, wrong_columns):
"""Test that ValueError is raised if wrong column is supplied."""
with pytest.raises(ValueError):
frame.complete(columns=wrong_columns)
@pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns)
def test_empty_subcols(frame, empty_sub_cols):
"""Raise ValueError for an empty group in columns"""
with pytest.raises(ValueError):
frame.complete(columns=empty_sub_cols)
def test_fill_value(df1):
"""Test fill_value argument."""
output1 = pd.DataFrame(
{
"Year": [1999, 1999, 2000, 2000, 2004, 2004],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1, 4.0, 0, 5, 8, 2],
}
)
result = df1.complete(
columns=["Year", "Taxon"], fill_value={"Abundance": 0}
)
assert_frame_equal(result, output1)
@pytest.fixture
def df1_output():
return pd.DataFrame(
{
"Year": [
1999,
1999,
2000,
2000,
2001,
2001,
2002,
2002,
2003,
2003,
2004,
2004,
],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2],
}
)
def test_fill_value_all_years(df1, df1_output):
"""
Test the complete function accurately replicates for
all the years from 1999 to 2004.
"""
result = df1.complete(
columns=[
{"Year": lambda x: range(x.Year.min(), x.Year.max() + 1)},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series(df1, df1_output):
"""
Test the complete function if a dictionary containing a Series
is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": lambda x: pd.Series(
range(x.Year.min(), x.Year.max() + 1)
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series_duplicates(df1, df1_output):
"""
Test the complete function if a dictionary containing a
Series (with duplicates) is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": pd.Series(
[1999, 2000, 2000, 2001, 2002, 2002, 2002, 2003, 2004]
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_values_outside_range(df1):
"""
Test the output if a dictionary is present,
and none of the values in the dataframe,
for the corresponding label, is not present
in the dictionary's values.
"""
result = df1.complete(
columns=[("Taxon", "Abundance"), {"Year": np.arange(2005, 2007)}]
)
expected = pd.DataFrame(
[
{"Taxon": "Agarum", "Abundance": 1, "Year": 1999},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2006},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2004},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2004},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 1999},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2000},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2006},
]
)
|
assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 21 00:04:24 2021
@author: bernifoellmer
"""
import os, sys
import numpy as np
import pandas as pd
import time
import pydicom
from glob import glob
from tqdm import tqdm
def computeSliceSpacing(alldcm):
try:
if len(alldcm)>1:
ds0 = pydicom.dcmread(alldcm[0], force = False, defer_size = 256, specific_tags = ['SliceLocation'], stop_before_pixels = True)
value0 = float(ds0.data_element('SliceLocation').value)
ds1 = pydicom.dcmread(alldcm[1], force = False, defer_size = 256, specific_tags = ['SliceLocation'], stop_before_pixels = True)
value1 = float(ds1.data_element('SliceLocation').value)
SliceSpacing = abs(value1-value0)
else:
SliceSpacing = -1.0
except Exception as why:
SliceSpacing = -1.0
return SliceSpacing
def extractMissingSeries(settings, SeriesInstanceUIDList):
df_dicom_missing = pd.DataFrame(columns=['StudyInstanceUID', 'SeriesInstanceUID'])
study_uids = os.listdir(settings['fp_images'])
pbar = tqdm(total=len(study_uids))
pbar.set_description("Collect missing series")
for study_uid in study_uids:
pbar.update(1)
series_uids = os.listdir(os.path.join(settings['fp_images'], study_uid))
for series_uid in series_uids:
if series_uid not in SeriesInstanceUIDList:
df_dicom_missing = df_dicom_missing.append(dict({'StudyInstanceUID':study_uid, 'SeriesInstanceUID':series_uid}), ignore_index=True)
pbar.close()
return df_dicom_missing
def extractDICOM_sql(settings, StudyInstanceUID, SeriesInstanceUID):
path_series = os.path.join(settings['fp_images'], StudyInstanceUID, SeriesInstanceUID)
alldcm = glob(path_series + '/*.dcm')
ds = pydicom.dcmread(alldcm[0], force = False, defer_size = 256, specific_tags = ['NumberOfFrames'], stop_before_pixels = True)
try:
NumberOfFrames = ds.data_element('NumberOfFrames').value
MultiSlice = True
except:
NumberOfFrames=''
MultiSlice = False
#print('except0:')
specific_tags = settings['dicom_tags']
specific_tags_dcm = specific_tags.copy()
if 'Site' in specific_tags_dcm: specific_tags_dcm.remove('Site')
if 'Count' in specific_tags_dcm: specific_tags_dcm.remove('Count')
if 'SliceSpacing' in specific_tags_dcm: specific_tags_dcm.remove('SliceSpacing')
tags = dict.fromkeys(specific_tags)
if MultiSlice:
for dcm in alldcm[0:1]:
try:
ds = pydicom.dcmread(dcm, force = False, defer_size = 256, specific_tags = specific_tags_dcm, stop_before_pixels = True)
except Exception as why:
#print('Exception:', why)
print('StudyInstanceUID:', StudyInstanceUID)
print('SeriesInstanceUID:', SeriesInstanceUID)
tags['StudyInstanceUID'] = StudyInstanceUID
tags['SeriesInstanceUID'] = SeriesInstanceUID
continue
if 'Site' in specific_tags:
tags['Site'] = 'P'+ str(ds.PatientID).split('-')[0]
if 'Count' in specific_tags:
tags['Count'] = len(alldcm)
if 'SliceSpacing' in specific_tags:
tags['SliceSpacing'] = -1
for tag in specific_tags:
try:
data_element = ds.data_element(tag)
except:
#print('except1')
continue
if data_element is None:
continue
tags[tag] = str(data_element.value)
else:
try:
ds = pydicom.dcmread(alldcm[0], force = False, defer_size = 256, specific_tags = specific_tags_dcm, stop_before_pixels = True)
except Exception as why:
print('StudyInstanceUID:', StudyInstanceUID)
print('SeriesInstanceUID:', SeriesInstanceUID)
tags['StudyInstanceUID'] = StudyInstanceUID
tags['SeriesInstanceUID'] = SeriesInstanceUID
if 'Site' in specific_tags:
tags['Site'] = 'P'+ str(ds.PatientID).split('-')[0]
if 'Count' in specific_tags:
tags['Count'] = len(alldcm)
if 'SliceSpacing' in specific_tags:
#print('test02')
SliceSpacing = computeSliceSpacing(alldcm)
tags['SliceSpacing'] = SliceSpacing
# Extract tags if exist
for tag in specific_tags:
#print('found0', tag)
try:
data_element = ds.data_element(tag)
except:
continue
if data_element is None:
continue
tags[tag] = str(data_element.value)
# Replace None
for key, value in tags.items():
if value == 'None':
tags[key] = None
return tags
def extractDICOMTags(settings, SeriesInstaceUIDList, NumSamples=None):
root = settings['folderpath_discharge']
fout = settings['filepath_dicom']
specific_tags = settings['dicom_tags']
cols_first=[]
study_uids = os.listdir(root)
df =
|
pd.DataFrame(columns=specific_tags)
|
pandas.DataFrame
|
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx =
|
DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
|
pandas.DatetimeIndex
|
from typing import Dict, Tuple, List, Optional
import numpy as np
import pandas as pd
from wiseml.feature_engineering.ohlc.base import OHLCFeatureEngineer
class OHLCStandardFeatureEngineer(OHLCFeatureEngineer):
default_period_list = [10, 30, 60]
default_ma_window_list = [10, 80, 120] # default moving average period list
default_z_score_rolling_window = 1440 # one day
def __init__(self, period: Tuple[str, str] = ('1', 'min'), period_list: Optional[List[int]] = None,
moving_average_window_list: Optional[List[int]] = None, z_score_rolling_window: Optional[int] = None,
*args, **kwargs):
"""
:param period: tuple that describe period of one observation in data (1 min or 1000 volume for example)
:param period_list: list of periods for creating features based on n-prev observation period
(n its value from period list)
:param moving_average_window_list: list of periods (number of observations) for creating moving averages features
:param z_score_rolling_window: number of prev observations for z-scoring (we can`t take info from future)
"""
super().__init__(*args, **kwargs)
self.features: pd.DataFrame =
|
pd.DataFrame(index=self._data.index)
|
pandas.DataFrame
|
from datetime import datetime, timedelta
import matplotlib
import pandas as pd
from flask import flash, render_template, url_for, redirect, session
from plotnine import *
from app import app, db
from app.apputils import Const, Params, DTime, FileUtils
from app.database.dbutils import DbCourse, DbStudent, DbForm, Db, Dashboard
from app.database.models import Course, Student
from app.forms import CourseCreateForm, CourseDeleteForm, StudentCreateForm, StudentDeleteForm, SpreadsheetSelect, \
SheetsSelect, InitForm, DashboardForm
# https://stackoverflow.com/questions/27147300/matplotlib-tcl-asyncdelete-async-handler-deleted-by-the-wrong-thread
matplotlib.use("Agg")
@app.route("/")
@app.route("/index")
def index():
success = Db.exists()
if not success:
return redirect(url_for("basesetup"))
return render_template("index.html")
@app.route("/base_setup", methods=["GET", "POST"])
def basesetup():
form = InitForm()
if form.validate_on_submit():
try:
db.create_all()
message = f"Succès : La base a été initialisée"
except Exception as ex:
message = f"Erreur : Impossible d'initialiser la base"
flash(message)
return redirect(url_for("index"))
else:
success = Db.exists()
if success:
return redirect(url_for("index"))
return render_template("base_setup.html", form=form)
@app.route("/courses", methods=["GET"])
def courses():
courses_list = db.session.query(Course).order_by(Course.startdate).all()
return render_template("courses.html", courses=courses_list)
@app.route("/course/create", methods=["GET", "POST"])
def course_create():
form = CourseCreateForm()
if form.validate_on_submit():
success, message = DbCourse.insert(
label=form.label.data,
startdate=form.startdate.data,
enddate=form.enddate.data,
fileid=form.fileid.data
)
flash(message)
if success:
return redirect(url_for("courses"))
return render_template("course_create.html", form=form)
@app.route("/course/delete", methods=["GET", "POST"])
def course_delete():
courses_list = db.session.query(Course).order_by(Course.startdate).all()
form = CourseDeleteForm()
form.course.choices = []
for course in courses_list:
displaytext = f"{course.label} du {DTime.formatdate(course.startdate)} au " \
f"{DTime.formatdate(course.enddate)}, fichier {course.filename}"
form.course.choices.append((str(course.id), displaytext))
if form.validate_on_submit():
success, message = DbCourse.delete(courseid=form.course.data)
flash(message)
if success:
return redirect("/courses")
return render_template("course_delete.html", form=form)
@app.route("/students", methods=["GET"])
def students():
students_list = db.session.query(Student).order_by(
Student.course_id, Student.id
).all()
return render_template("students.html", students=students_list)
@app.route("/student/create", methods=["GET", "POST"])
def student_create():
courses_list = db.session.query(Course).all()
form = StudentCreateForm()
form.course.choices = []
for course in courses_list:
displaytext = f"{course.label} du {DTime.formatdate(course.startdate)} au " \
f"{DTime.formatdate(course.enddate)}, fichier {course.filename}"
form.course.choices.append((str(course.id), displaytext))
if form.validate_on_submit():
success, message = DbStudent.insert(
lastname=form.lastname.data,
firstname=form.firstname.data,
email=form.email.data,
course_id=form.course.data
)
flash(message)
if success:
return redirect(url_for("students"))
return render_template("student_create.html", form=form)
@app.route("/student/delete", methods=["GET", "POST"])
def student_delete():
students_list = db.session.query(Student).order_by(
Student.course_id, Student.lastname, Student.firstname
)
form = StudentDeleteForm()
form.student.choices = []
for student in students_list:
displaytext = f"Etudiant(e) {student.firstname} {student.lastname} (email {student.email}, " \
f"formation {student.course.label})"
form.student.choices.append((str(student.id), displaytext))
if form.validate_on_submit():
success, message = DbStudent.delete(studentid=form.student.data)
flash(message)
if success:
return redirect("/students")
return render_template("student_delete.html", form=form)
@app.route("/spreadsheets", methods=["GET", "POST"])
def spreadsheets():
form = SpreadsheetSelect()
if form.validate_on_submit():
if form.enddate.data is None:
flash("Erreur : Aucune date choisie, utilisation d'une valeur par défaut")
else:
delta = (datetime.now().date() - form.enddate.data)
newmaxdays = DTime.timedelta2days(delta)
session[Const.MAX_DAYS_TO_ENDDATE] = newmaxdays
return redirect(url_for("sheets"))
else:
maxdays = Params.getsessionvar(name=Const.MAX_DAYS_TO_ENDDATE,
default=Const.DEFAULT_DAYS_TO_ENDDATE)
minenddate = datetime.now() - timedelta(days=int(maxdays))
form.enddate.data = minenddate
okcourses = DbCourse.querycourses(minenddate=form.enddate.data)
return render_template("spreadsheets.html", courses=okcourses, form=form)
@app.route("/sheets", methods=["GET", "POST"])
def sheets():
form = SheetsSelect()
maxdays = Params.getsessionvar(name=Const.MAX_DAYS_TO_ENDDATE,
default=Const.DEFAULT_DAYS_TO_ENDDATE)
minenddate = datetime.now() - timedelta(days=int(maxdays))
if form.validate_on_submit():
# Saving number of days for friendiness, failure unimportant
if form.daysnochange.data is None:
daysnochange = Params.getsessionvar(name=Const.MAX_DAYS_SHEET_UNCHANGED,
default=Const.DEFAULT_DAYS_UNCHANGED)
flash("Erreur : Aucune durée choisie, utilisation d'une valeur par défaut")
else:
session[Const.MAX_DAYS_SHEET_UNCHANGED] = form.daysnochange.data
daysnochange = int(form.daysnochange.data)
success = DbForm.updateall(minenddate=minenddate, daysnochange=daysnochange)
if not success:
flash("Attention : Des erreurs dans la mise à jour, certaines réponses non mises à jour")
return redirect(url_for("sheets"))
else:
daysnochange = Params.getsessionvar(name=Const.MAX_DAYS_SHEET_UNCHANGED,
default=Const.DEFAULT_DAYS_UNCHANGED)
form.daysnochange.data = daysnochange
oksheets = DbForm.queryforms(minenddate=minenddate, daysnochange=form.daysnochange.data)
return render_template("sheets.html", gforms=oksheets, form=form)
@app.route("/dashboard", methods=["GET", "POST"])
def dashboard():
# TODO check dates coherent
form = DashboardForm()
form.courses.choices = []
form.students.choices = []
for course in db.session.query(Course).all():
displaytext = f"{course.label} du {DTime.formatdate(course.startdate)} au " \
f"{DTime.formatdate(course.enddate)}, fichier {course.filename}"
form.courses.choices.append((str(course.id), displaytext))
for student in db.session.query(Student).all():
displaytext = f"{student.firstname} {student.lastname} (email {student.email}, " \
f"formation {student.course.label})"
form.students.choices.append((str(student.id), displaytext))
if form.validate_on_submit():
session["DASHBOARD_COURSE_IDS"] = [int(x) for x in form.courses.data]
session["DASHBOARD_STUDENT_IDS"] = [int(x) for x in form.students.data]
session["DASHBOARD_STARTDATE"] = DTime.datetimeencode(form.startdate.data)
session["DASHBOARD_ENDDATE"] = DTime.datetimeencode(form.enddate.data)
return redirect(url_for("dashboard_analyze"))
else:
form.startdate.data = DTime.min()
form.enddate.data = DTime.max()
return render_template("dashboard.html", form=form)
@app.route("/dashboard/analyze")
def dashboard_analyze():
dashbrd = Dashboard.querycriteria()
# display choices in readonly form fields as a reminder
form = DashboardForm()
form.courses.render_kw = {"readonly": True}
form.students.render_kw = {"readonly": True}
form.startdate.render_kw = {"readonly": True}
form.enddate.render_kw = {"readonly": True}
form.courses.choices = []
form.students.choices = []
for course in dashbrd.courses_list:
displaytext = f"{course.label} du {DTime.formatdate(course.startdate)} au " \
f"{DTime.formatdate(course.enddate)}, fichier {course.filename}"
form.courses.choices.append((str(course.id), displaytext))
for student in dashbrd.students_list:
displaytext = f"{student.firstname} {student.lastname} (email {student.email}, " \
f"formation {student.course.label})"
form.students.choices.append((str(student.id), displaytext))
form.startdate.data = dashbrd.startdate.date()
form.enddate.data = dashbrd.enddate.date()
# prepare num graphs
numgraphpaths = list()
numanswers = dashbrd.querynumanswers()
curtime = datetime.now(tz=None)
suffix = f"{curtime.hour}{curtime.minute}{curtime.second}"
for qindex, numanswer in enumerate(numanswers):
data =
|
pd.DataFrame({"x": numanswer.grades})
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Type, Union, Optional, Sequence, Callable
import numpy as np
import pandas as pd
from kats.consts import Params
from kats.consts import _log_error, TimeSeriesData
from kats.detectors.detector import DetectorModel
from kats.models.model import Model
ArrayLike = Union[np.ndarray, Sequence[float], pd.Series]
@dataclass
class EvaluationObject:
input_data: Optional[Union[ArrayLike, TimeSeriesData, pd.DataFrame]]
model: Optional[Union[Model, DetectorModel]]
preds: Optional[ArrayLike]
labels: Optional[ArrayLike]
results: Optional[pd.DataFrame]
class Evaluator(ABC):
def __init__(self):
self.runs: Dict[str, EvaluationObject] = {}
def create_evaluation_run(self, run_name: str) -> None:
self._check_if_valid_run_name(run_name=run_name, should_exist=False)
self.runs[run_name] = EvaluationObject(None, None, None, None, None)
def delete_evaluation_run(self, run_name: str):
self._check_if_valid_run_name(run_name=run_name)
del self.runs[run_name]
def _check_if_valid_run_name(
self, run_name: str, should_exist: bool = True
) -> None:
if not isinstance(run_name, str): # Check if name is a string
msg = f"Name of evaluation run must be a string, but is of type {type(run_name)}."
raise _log_error(msg)
# Handle case depending on if run_name should exist or not
if should_exist != (run_name in self.runs):
msg = f"Run name {run_name} {'already exists' if run_name in self.runs else 'does not exist'}. Please choose a valid run name."
raise _log_error(msg)
def get_evaluation_run(self, run_name: str) -> EvaluationObject:
self._check_if_valid_run_name(run_name=run_name) # Check if valid run
return self.runs[run_name]
@abstractmethod
def generate_predictions(
self,
run_name: str,
model: Type,
model_params: Optional[Union[Params, Dict[str, float]]],
tune_params=False,
) -> ArrayLike:
pass
def evaluate(
self,
run_name: str,
metric_to_func: Dict[str, Callable],
labels: ArrayLike,
) -> pd.DataFrame:
# Check preconditions
self._check_if_valid_run_name(run_name=run_name) # Check if valid run
evaluator = self.runs[run_name]
if np.ndim(evaluator.preds) < 1: # Check if predictions are a valid type
msg = f"Invalid type: {type(labels)} for predictions. Must be an iterable object."
raise _log_error(msg)
if np.ndim(labels) < 1: # Check if labels are a valid type
msg = (
f"Invalid type: {type(labels)} for labels. Must be an iterable object."
)
raise _log_error(msg)
evaluator.labels = labels
# pyre-fixme[6]: Incompatible parameter type...
if len(evaluator.preds) != len(labels): # Check lengths of preds and labels
msg = "Predictions and labels have unequal lengths."
raise _log_error(msg)
# Run evaluation
metric_to_result: Dict[str, ArrayLike] = {}
for metric, func in metric_to_func.items():
try:
metric_vals = func(evaluator.preds, evaluator.labels)
except ValueError as e:
msg = (
f"Error running evaluation for metric {metric}. Full message:\n {e}"
)
raise _log_error(msg)
metric_to_result[metric] = [metric_vals]
# Save and return evaluation results
aggregated_results =
|
pd.DataFrame(metric_to_result)
|
pandas.DataFrame
|
#%%
# Import the necessary packages. The Open Datasets package contains
# a class representing each data source (NycTlcGreen for example)
# to easily filter date parameters before downloading
from azureml.opendatasets import NycTlcGreen
from datetime import datetime
from dateutil.relativedelta import relativedelta
import os
import pandas as pd
#%%
# Create a dataframe to hold the taxi data. When working in a non-
# Spark environment, Open Datasets only allows downloading one month
# of data at a time with certain classes to avoid MemoryError with
# large datasets
green_taxi_df =
|
pd.DataFrame([])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
import lightgbm as lgb
from sklearn.ensemble import RandomForestClassifier
#importing the necessary libraries
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from sklearn.linear_model import LinearRegression
from sklearn.utils import column_or_1d
from sklearn.metrics import accuracy_score
# lectura de datos
data_x =
|
pd.read_csv('../nepal_earthquake_tra.csv')
|
pandas.read_csv
|
import os
import sys
import pandas as pd
args = sys.argv[1:]
directory = args[0]
start_name = args[1]
try:
datasets = args[2].split(",")
except:
datasets = ["rte", "sts-b", "mrpc", "cola"]
files = [x for x in os.listdir(directory) if x.startswith(start_name)]
all_datasets = ["rte", "sts-b", "mrpc", "cola"]
metrics = ["test_acc", "test_pearson", "test_acc", "test_mcc"]
metrics = dict(zip(all_datasets, metrics))
pd_hold = []
for dataset in datasets:
metric = metrics[dataset]
for run in [x for x in files if "DATASET_" + dataset in x]:
results = os.path.join(directory, run, "test_best_log.txt")
try:
result =
|
pd.read_csv(results)
|
pandas.read_csv
|
from typing import Any, Callable, Iterable, List, Tuple, Union
from cached_property import cached_property
import pandas as pd
import py2neo
from py2neo import matching
from py2neo import ogm
import numpy as np
import pandas2neo4j
from pandas2neo4j.pandas_model import PandasModel
from pandas2neo4j.errors import (
NodeWithIdDoesNotExistError,
NotSupportedModelClassError,
InvalidArgumentsConfigurationError,
RelationshipDoesNotExistError,
)
class PandasGraph(ogm.Repository):
"""
Class representing the underlying graph.
:class:`.PandasGraph` provides multiple facilities to operate on graph's nodes (mostly wrapped
by :class:`ogm.Model` and :class:`.PandasModel`) using some data stored in `pandas.DataFrame`
tables. One can create :class:`.PandasModel` instances and relationships between nodes based
on rows of given table.
"""
@property
def schema(self) -> py2neo.Schema:
"""
:class:`py2neo.Schema` object of underlying graph.
"""
return self.graph.schema
@cached_property
def _node_matcher(self) -> matching.NodeMatcher:
return matching.NodeMatcher(self.graph)
@cached_property
def _relationship_matcher(self) -> matching.RelationshipMatcher:
return matching.RelationshipMatcher(self.graph)
def create_graph_object(self, subgraph: Union[ogm.Model, py2neo.Entity]):
"""
Push object to remote graph
:param subgraph: either :class:`py2neo.ogm.Model` of :class:`py2neo.Entity` instance.
"""
if hasattr(subgraph, "__node__"):
subgraph = subgraph.__node__
self.graph.create(subgraph)
def create_graph_objects(self, objects: Iterable[Union[ogm.Model, py2neo.Entity]]):
"""
Push collection of objects to remote graph
:param objects: an iterable of either :class:`py2neo.ogm.Model` or :class:`py2neo.Entity` instances.
"""
tx = self.graph.begin()
for obj in objects:
if hasattr(obj, "__node__"):
obj = obj.__node__
tx.create(obj)
tx.commit()
def _get_node_from_model(
self,
model_class: ogm.Model,
id_key: str,
id_value: Any,
) -> py2neo.Node:
del id_key
model_instance = self.get(model_class, id_value)
if model_instance is None:
raise NodeWithIdDoesNotExistError(model_class, id_value)
return model_instance.__node__
def _get_node_from_str(
self,
model_class: str,
id_key: str,
id_value: Any,
) -> py2neo.Node:
return self.graph.nodes.match(model_class, **{id_key: id_value}).first()
def _node_getter(self, model_class: Union[ogm.Model, str]) -> Callable:
if type(model_class) is str:
return self._get_node_from_str
elif issubclass(model_class, ogm.Model):
return self._get_node_from_model
raise NotSupportedModelClassError
def _get_relationship_nodes(
self,
row: pd.Series,
from_model_class: Union[ogm.Model, str],
to_model_class: Union[ogm.Model, str],
from_key_column: str,
to_key_column: str,
from_model_id_key: str = None,
to_model_id_key: str = None,
) -> Tuple[py2neo.Node, py2neo.Node]:
from_node_getter = self._node_getter(from_model_class)
to_node_getter = self._node_getter(to_model_class)
return (
from_node_getter(from_model_class, from_model_id_key, row[from_key_column]),
to_node_getter(to_model_class, to_model_id_key, row[to_key_column]),
)
def _create_relationship(
self,
relationship: str,
from_node: py2neo.Node,
to_node: py2neo.Node,
) -> py2neo.Relationship:
if from_node is None or to_node is None:
raise NodeWithIdDoesNotExistError()
return py2neo.Relationship(from_node, relationship, to_node)
def create_relationships_from_dataframe(
self,
df: pd.DataFrame,
relationship: str,
from_model_class: Union[ogm.Model, str],
to_model_class: Union[ogm.Model, str],
from_key_column: str,
to_key_column: str,
from_model_id_key: str = None,
to_model_id_key: str = None,
chunk_size: int = 0,
) -> pd.Series:
"""
Create relationships of type `relationship` between instances of `from_model_class` and `to_model_class`.
Return a :class:`pandas.Series` of :class:`py2neo.Relationship` objects that represent each relationship
in the table.
Relationships are listed in `df: pandas.DataFrame` where rows contain pairs of ids sufficient to identify
the entities that should be connected. `from_key_column` and `to_key_column` arguments specify names of the
columns that contain these ids. By default :class:`ogm.Model`'s *__primarykey__* is used to identify the
nodes with id values, however other properties can be used by specifying `from_model_id_key`/`to_model_id_key`.
These values are also required if `from_model_class` or `to_model_class` are not provided as a subclass of
`ogm.Model` but a string with node label is provided instead.
`chunk_size` parameter can be used if the `df` table is large. It specifies the maximal number of relationships
than can be created within a single transaction. Note that `numpy.array_split` function is used to split the
table into chunks, so the size of each part may be different than the number specified in the `chunk_size`.
:param df: A table with relationships key pairs. Each row should contain ids of already existing nodes in
`from_key_column` and `to_key_column` columns.
:type df: :class:`pandas.DataFrame`
:param relationship: Name of the relationship that should be created
:type relationship: str
:param from_model_class: Either :class:`ogm.Model` subclass (e.g. subclass of :class:`.PandasModel`)
or `str` with class name/label of :class:`py2neo.ogm.Model`/:class:`py2neo.Node` instances that
should be starting nodes of each relationship.
:type from_model_class: Union[:class:`ogm.Model`, str]
:param to_model_class: Either :class:`ogm.Model` subclass (e.g. subclass of :class:`.PandasModel`)
or `str` with class name/label of :class:`py2neo.ogm.Model`/:class:`py2neo.Node` instances that
should be ending nodes of each relationship.
:type to_model_class: Union[:class:`ogm.Model`, str]
:param from_key_column: Name of the column in `df` table containing ids of the relationships starting nodes.
:type from_key_column: str
:param to_key_column: Name of the column in `df` table containing ids of the relationships ending nodes.
:type to_key_column: str
:param from_model_id_key: Name of the property that should be used to identify the node by value specified in
`from_key_column`. If `from_model_class` is a :class:`ogm.Model` subclass the *__primarykey__* is used
by default and this parameter can be omitted.
:type from_model_id_key: str, optional
:param to_model_id_key: Name of the property that should be used to identify the node by value specified in
`to_key_column`. If `to_model_class` is a :class:`ogm.Model` subclass the *__primarykey__* is used
by default and this parameter can be omitted.
:type to_model_id_key: str, optional
:param chunk_size: Maximal number of rows that should be converted into relationships within a single transation.
:type chunk_size: int, optional
:return: A :class:`pandas.Series` with :class:`py2neo.Relationship` objects for each row in the `df` table.
"""
chunk_num = 1 if chunk_size == 0 else np.ceil(len(df) / chunk_size)
all_relationships = []
for chunk in np.array_split(df, chunk_num):
relationships = chunk.apply(
lambda row: self._create_relationship(
relationship,
*self._get_relationship_nodes(
row,
from_model_class,
to_model_class,
from_key_column,
to_key_column,
from_model_id_key=from_model_id_key,
to_model_id_key=to_model_id_key,
),
),
axis=1,
)
self.create_graph_objects(relationships)
all_relationships.append(relationships)
return
|
pd.concat(all_relationships)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Ice core AR(n) fits
Created on Wed May 12 14:00:10 2021
@author: lizz
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
## Read in Greenland time series
core_accum_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Ice_core_accum/Andersen_etal_2006_Annual_Accumulation_22Mar2011-trimmed.csv'
core_tseries = pd.read_csv(core_accum_fpath, index_col=0, parse_dates=[0])
core_names = core_tseries.columns
series_to_test = core_tseries
## Pre-process data
anomaly_series = series_to_test - series_to_test.mean()
def adf_test(timeseries):
print('A timeseries ready for AR(n) fitting should have ADF test statistic more negative than critical value (reject the null hypothesis).')
print ('Results of Dickey-Fuller Test:')
dftest = sm.tsa.stattools.adfuller(timeseries, autolag='AIC')
dfoutput =
|
pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
|
pandas.Series
|
import pandas as pd
import numpy as np
from plotly.offline import iplot
import plotly.io as pio
from scanpy.plotting._tools.scatterplots import _get_palette
from anndata import AnnData
from typing import Union
def river_plot(
adata: AnnData,
source: str,
target: str,
cell_number: bool = True,
title: str = 'River plot (Sankey Diagram)',
save: Union[str, None] = None,
scale: int = 1
) -> None:
"""
Parameters
----------
adata:
Annotated data matrix.
source
Obs variable containing source annotation.
target
Obs variable containing target annotation.
cell_number
If True prints the number of cells in each category. Else, doesn't display it.
title
Title for the plot.
save
Save the plot.
scale
Above 1 it increase the resolution. Below 1 it reduce the resolution. Only matter when saving the plot.
"""
adata.obs[source] = adata.obs[source].astype('str').astype('category')
adata.obs[target] = adata.obs[target].astype('str').astype('category')
df_nodes, df_links = __tool_sankey(adata, source, target, cell_number=cell_number)
__plot_sankey(df_nodes, df_links,
title=title,
save=save,
scale=scale)
def __tool_sankey(adata, source, target, cell_number=True):
# extract key_infos in adata
key_infos = pd.crosstab(adata.obs[target], adata.obs[source])
# NODES
# transform key_infos into the nodes df
nodes = [['ID', 'Label', 'Color']]
if not cell_number:
label_list = key_infos.columns.tolist() + key_infos.index.tolist()
else:
target_cell_nb = pd.crosstab(adata.obs[target], adata.obs[target], margins=True)
source_cell_nb = pd.crosstab(adata.obs[source], adata.obs[source], margins=True)
source_names = []
for n in range(0, len(key_infos.columns.tolist())):
source_names.append(" n=".join([str(key_infos.columns.tolist()[n]), str(source_cell_nb['All'][n])]))
target_names = []
index = 0
for target_name in key_infos.index.tolist():
# print(target_name, target_cell_nb['All'][index])
target_names.append(" n=".join([target_name, str(target_cell_nb['All'][index])]))
index += 1
label_list = source_names + target_names
# print(label_list)
id_list = list(range(0, len(label_list), 1))
# Pay attention if clusters_colors or 'orig.ident_colors' missing
if source + '_colors' not in adata.uns.keys():
adata.uns[source + '_colors'] = list(_get_palette(adata, source).values())
if target + '_colors' not in adata.uns.keys():
adata.uns[target + '_colors'] = list(_get_palette(adata, target).values())
if type(adata.uns[source + '_colors']) == np.ndarray:
adata.uns[source + '_colors'] = adata.uns[source + '_colors'].tolist()
if type(adata.uns[target + '_colors']) == np.ndarray:
adata.uns[target + '_colors'] = adata.uns[target + '_colors'].tolist()
colors = adata.uns[source + '_colors'] + adata.uns[target + '_colors']
for number in id_list:
tmp_list = [number, label_list[number], colors[number]]
nodes.append(tmp_list)
# LINKS
key_infos_values = key_infos.values.tolist()
key_infos_index = key_infos.index.tolist()
# make the link df
links = [['Source', 'Target', 'Value', 'Link Color']]
index_target = len(label_list) - len(key_infos.index.tolist())
for index_value in key_infos_values:
index_source = 0
for count in index_value:
tmp_list = [index_source, index_target, count, colors[index_source]]
index_source += 1
links.append(tmp_list)
index_target += 1
# Retrieve headers and build dataframes
nodes_headers = nodes.pop(0)
links_headers = links.pop(0)
df_nodes = pd.DataFrame(nodes, columns=nodes_headers)
df_links =
|
pd.DataFrame(links, columns=links_headers)
|
pandas.DataFrame
|
import decimal
import json
import logging
from datetime import datetime
from typing import List
import arrow
import pandas as pd
from steampy.models import GameOptions
from db.db import Listing, Item, init_db
from market_sell.steam_classes import SteamClientPatched, SteamLimited
from . import utilities
TWODIGITS = decimal.Decimal('0.01')
logger = logging.getLogger(__name__)
class Exchange(object):
"""
Class representing an exchange. In this case Steam Market.
"""
def __init__(self, config: dict):
self._last_run = 0
self._config = config
self._initialize_database()
self._prepare_markets()
self._heartbeat_interval = config.get('heartbeat_interval')
self._heartbeat_msg: float = 0
self._timeout: int = self._config.get('market_sell_timeout')
def _prepare_markets(self) -> None:
if self._config.get('use_cookies', False):
try:
self.steam_client = SteamClientPatched.from_pickle(self._config['username'])
logger.info('Successfully logged in Steam trough Cookies')
except ValueError:
logger.info('Did not manage to log into Steam trough Cookies')
self._login_and_save_cookies()
else:
self._login_and_save_cookies()
self.steam_market = SteamLimited(self.steam_client.session, self._config['steamguard'],
self.steam_client.session_id, self.steam_client.currency)
def _login_and_save_cookies(self):
self.steam_client: SteamClientPatched = SteamClientPatched(self._config['apikey'])
self.steam_client.login(self._config['username'], self._config['password'],
json.dumps(self._config['steamguard']))
self.steam_client.to_pickle(self._config['username'])
def _initialize_database(self) -> None:
debug = self._config.get('debug', True)
db_url = self._config.get('db_url', True)
if debug:
init_db('sqlite:///sales_debug.sqlite')
else:
init_db(db_url)
def get_own_items(self, game: GameOptions = GameOptions.CS) -> pd.DataFrame:
"""
Gets *marketable* items in inventory for specified game
:param game: defaults CSGO
:return: Dataframe of items in inventory
"""
items_dict = self.steam_client.get_my_inventory(game)
columns = ['appid',
'classid',
'instanceid',
'tradable',
'name',
'name_color',
'type',
'market_hash_name',
'commodity',
'market_tradable_restriction',
'marketable',
'contextid',
'id',
'amount']
items =
|
pd.DataFrame.from_dict(items_dict, orient='index', columns=columns)
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 14:51:54 2020
A collection of cleanup functions that should just run.
Just keep them in one place and clean up the file structure a bit.
Expects that the entire pipeline up until now has been completed.
Hopefully this all works because will be hard to debug!!
Things might be out of order so need to check this.
@author: npittman
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import xesmf as xe
import cbsyst as cb
import os
from carbon_math import *
def find_enso_events_redundent(threshold=0.5):
'''
A function to pull ENSO data from our datasets/indexes/meiv2.csv
save events (months) stronger than threshold (0.5 by default)
'processed/indexes/el_nino_events.csv'
'processed/indexes/la_nina_events.csv'
Returns
-------
None.
'''
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso_flat=enso.stack()
enso_dates=pd.date_range('1979','2020-07-01',freq='M')- pd.offsets.MonthBegin(1) #Probably want to check this is correct if updating.
enso_timeseries=pd.DataFrame({'Date':enso_dates,'mei':enso_flat})
#Check if we are in or out of an event so far
el_event=False
la_event=False
el_startdate=''
la_startdate=''
elnino=pd.DataFrame()
lanina=pd.DataFrame()
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val>=threshold:
if el_event==False: #And we havent yet entered an event
el_startdate=today
el_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if el_event==True:
elnino=elnino.append({'start':el_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
el_event=False
for i,today in enumerate(enso_timeseries.Date):
val=enso_timeseries.mei.iloc[i]
if val<=-threshold:
if la_event==False: #And we havent yet entered an event
la_startdate=today
la_event=True
else:
pass
#Dont need to do anything because it will get caught later
else:
if la_event==True:
lanina=lanina.append({'start':la_startdate.to_datetime64(),
'end':enso_timeseries.Date.iloc[i-1],
'mei':enso_timeseries.mei.iloc[i-1]},ignore_index=True)
la_event=False
print(elnino)
print(lanina)
elnino.to_csv('processed/indexes/el_nino_events.csv')
lanina.to_csv('processed/indexes/la_nina_events.csv')
def find_enso_events_CP(threshold=0.5):
'''
A function to pull ENSO data from our datasets/indexes/meiv2.csv
save events (months) stronger than threshold (0.5 by default)
Modified to include CP, EP, El Nino and La Nina events and are saved to csv.
'processed/indexes/el_nino_events.csv'
'processed/indexes/la_nina_events.csv'
'processed/indexes/ep_el_nino_events.csv'
'processed/indexes/cp_el_nina_events.csv'
Returns
-------
None.
'''
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso=enso.iloc[3:] #Just so Both EMI and MEI start in 1981-01-01
enso_flat=enso.stack()
enso_dates=pd.date_range('1982','2020-07-01',freq='M')- pd.offsets.MonthBegin(1) #Probably want to check this is correct if updating.
emi=pd.read_csv('datasets/indexes/SINTEX_EMI.csv')
emi.time=emi.time.astype('datetime64[M]')
emi.index=emi.time
emi=emi.Obs
enso_timeseries=pd.DataFrame({'Date':enso_dates,'mei':enso_flat})
fp='processed/combined_dataset/month_data_exports.nc'
dat=xr.open_mfdataset(fp)
#Check if we are in or out of an event so far
el_event=False
la_event=False
ep_event=False
cp_event=False
cpc_event=False
el_startdate=''
la_startdate=''
ep_startdate=''
cp_startdate=''
cpc_startdate=''
elnino=
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import sys
import pandas as pd
# categorical feature have to be encoded properly if fed to a ML method
# In Tensorflow, you can feed the un-encoded categorical features, for 'normal'
# methods, the encoding has to be done beforehands --> here :)
class FeatureEncoder:
def __init__(self, options_dataset):
self.options = options_dataset;
self.filename_options_in = self.options.getFeatureSetStr();
self.diag_group_names = self.options.getDiagGroupNames();
return;
def __prepareHauptdiagnose(self, valStr):
return valStr[:3];
def __getDiagIndex(self, val):
try:
ind = self.diag_group_names.index(val);
except ValueError:
ind = -1;
return ind;
def __prepareMDC(self, valStr):
return str(valStr)[0];
def __prepareOE(self, valStr):
return str(int(valStr));
# def __categorizeBinary(self, df, featurename, feature_values):
# new_headers = [];
# new_headers.append(featurename + '_' + str(feature_values[0]));
# print(new_headers)
# df_new = pd.DataFrame(index=df.index, columns=new_headers);
# df_new = df_new.fillna(0);
# for index, row in df.iterrows():
# val = str(row[featurename]);
# assert (val in feature_values), "The current value is not in the list of possible value for this feature"
# col_new = featurename + '_' + str(val);
# if val == feature_values[0]:
# df_new.at[index, col_new] = 1;
# else:
# df_new.at[index, col_new] = 0;
# print('df_new: ' + str(df_new.shape))
# print(list(df_new.columns))
# return df_new;
def __categorizeMulti(self, df, featurename, feature_values):
new_headers = [];
print('group values: ' + str(feature_values))
for g in feature_values:
h_new = featurename + '_' + str(g);
new_headers.append(h_new);
print('len(categorical_headers): ' + str(len(new_headers)))
print(new_headers)
df_new = pd.DataFrame(index=df.index, columns=new_headers);
df_new = df_new.fillna(0);
for index, row in df.iterrows():
val = str(row[featurename]);
assert ( val in feature_values), "The current value is not in the list of possible value for this feature: %s" % str(val)
col_new = featurename + '_' + str(val);
df_new.at[index, col_new] = 1;
return df_new;
def __encodeCategoricalFeatures(self, df):
categorical_features = self.options.getCategoricalFeatures();
column_names = list(df.columns);
for feat in sorted(categorical_features):
if feat in column_names:
# if feat == self.options.getNameMainDiag():
# continue;
print('encode feature: ' + str(feat));
group_values = self.options.getFeatureCategories(feat);
df_new = self.__categorizeMulti(df, feat, group_values);
df = pd.concat([df, df_new], axis=1);
df = df.drop(feat, axis=1);
print('df: ' + str(df.shape))
return df;
def __encodeBinarizableFeature(self, df, name):
print('binarize feature: ' + str(name));
columns = list(df.columns);
if name in columns:
if name == 'Verweildauer':
df[name] = df[name].apply(self.options.getLOSState);
df_binary = self.__categorizeValues(df, name);
elif name == 'Eintrittsalter':
df[name] = df[name].apply(self.options.getAgeState);
df_binary = self.__categorizeValues(df, name);
else:
print('this feature is not binarizable or the method to binarize it is not yet implemented: ' + str(name));
sys.exit();
df = pd.concat([df, df_binary], axis=1);
df = df.drop(name, axis=1);
print('df.shape: ' + str(df.shape))
return df;
# 'simplify' categorical features with too many variables --> not enough data to 'learn' useful representations
# from all categories, hence this simplification (at least for now)
def __preprocessFeatureEncoding(self, df):
headers_data = list(df.columns);
num_headers_data = len(headers_data);
print('num headers: ' + str(num_headers_data))
print(headers_data)
df = df.fillna(0);
for h in headers_data:
if h == 'Hauptdiagnose' or h == 'main_diag':
print('preprocess main diagnosis...')
df[h] = df[h].apply(self.__prepareHauptdiagnose);
# df[h + '_ind'] = df[h].apply(self.__getDiagIndex);
elif h == 'AufnehmOE':
df[h] = df[h].apply(self.__prepareOE);
elif h == 'EntlassOE':
df[h] = df[h].apply(self.__prepareOE);
elif h == 'DRGCode':
df[h] = df['DRGCode'].apply(self.__prepareMDC);
return df;
def encodeFeatures(self):
encoding = self.options.getEncodingScheme();
assert encoding is not None, 'an encoding algorithm has to be selected..exit';
assert encoding in ['categorical', 'binary', 'embedding','encoding'], 'feature encoding scheme is not known...please select one of the following: categorical, binary, embedding';
dir_data = self.options.getDirData();
data_prefix = self.options.getDataPrefix();
dataset = self.options.getDatasetName();
name_dem_features = self.options.getFilenameOptionDemographicFeatures();
print('encode features: ' + str(encoding))
strFilename_in = dataset + '_' + name_dem_features + '_' + self.filename_options_in;
strFilename_out = strFilename_in + '_' + encoding;
filename_data_in = os.path.join(dir_data, data_prefix + '_' + strFilename_in + '.csv');
filename_data_out = os.path.join(dir_data, data_prefix + '_' + strFilename_out + '.csv');
df =
|
pd.read_csv(filename_data_in)
|
pandas.read_csv
|
import json
import pickle
from datetime import date, datetime
from typing import Any
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
from pytest import raises
from slide.exceptions import (
SlideCastError,
SlideIndexIncompatibleError,
SlideInvalidOperation,
)
from slide.utils import SlideUtils
from slide_test.utils import assert_duck_eq, assert_pdf_eq, make_rand_df
from triad import Schema
from triad.utils.pyarrow import expression_to_schema, TRIAD_DEFAULT_TIMESTAMP
class SlideTestSuite(object):
"""Pandas-like utils test suite.
Any new :class:`~slide.utils.SlideUtils` should pass this test suite.
"""
class Tests(TestCase):
@classmethod
def setUpClass(cls):
# register_default_sql_engine(lambda engine: engine.sql_engine)
cls._utils = cls.make_utils(cls)
pass
def make_utils(self) -> SlideUtils:
raise NotImplementedError
@property
def utils(self) -> SlideUtils:
return self._utils # type: ignore
@classmethod
def tearDownClass(cls):
# cls._engine.stop()
pass
def to_pd(self, data: Any) -> pd.DataFrame:
raise NotImplementedError
def to_df(
self,
data: Any,
columns: Any = None,
coerce: bool = True,
):
raise NotImplementedError
def test_to_safe_pa_type(self):
assert pa.string() == self.utils.to_safe_pa_type(np.dtype(str))
assert pa.string() == self.utils.to_safe_pa_type(np.dtype(object))
assert TRIAD_DEFAULT_TIMESTAMP == self.utils.to_safe_pa_type(
np.dtype("datetime64[ns]")
)
if pd.__version__ >= "1.2":
assert pa.float64() == self.utils.to_safe_pa_type(pd.Float64Dtype())
assert pa.float32() == self.utils.to_safe_pa_type(pd.Float32Dtype())
assert pa.string() == self.utils.to_safe_pa_type(str)
assert pa.string() == self.utils.to_safe_pa_type("string")
assert pa.date32() == self.utils.to_safe_pa_type(date)
assert TRIAD_DEFAULT_TIMESTAMP == self.utils.to_safe_pa_type(datetime)
def test_is_series(self):
df = self.to_df([["a", 1]], "a:str,b:long")
assert self.utils.is_series(df["a"])
assert not self.utils.is_series(None)
assert not self.utils.is_series(1)
assert not self.utils.is_series("abc")
def test_to_series(self):
s1 = self.utils.to_series(pd.Series([0, 1], name="x"))
s2 = self.utils.to_series(pd.Series([2, 3], name="x"), "y")
s3 = self.utils.to_series([4, 5], "z")
s4 = self.utils.to_series(self.utils.to_series(s2), "w")
assert self.utils.is_series(s1)
assert self.utils.is_series(s2)
assert self.utils.is_series(s3)
assert self.utils.is_series(s4)
df = self.utils.cols_to_df([s1, s2, s3, s4])
assert_pdf_eq(
self.to_pd(df),
pd.DataFrame(dict(x=[0, 1], y=[2, 3], z=[4, 5], w=[2, 3])),
)
def test_to_constant_series(self):
s = self.utils.to_series(pd.Series([0, 1], name="x"))
s1 = self.utils.to_constant_series("a", s, name="y")
s2 = self.utils.to_constant_series(None, s, name="z", dtype="float64")
df = self.utils.cols_to_df([s, s1, s2])
assert_pdf_eq(
self.to_pd(df),
pd.DataFrame(dict(x=[0, 1], y=["a", "a"], z=[None, None])),
)
def test_get_col_pa_type(self):
df = self.to_df(
[["a", 1, 1.1, True, datetime.now()]],
"a:str,b:long,c:double,d:bool,e:datetime",
)
assert pa.types.is_string(self.utils.get_col_pa_type(df["a"]))
assert pa.types.is_string(self.utils.get_col_pa_type("a"))
assert pa.types.is_int64(self.utils.get_col_pa_type(df["b"]))
assert pa.types.is_integer(self.utils.get_col_pa_type(123))
assert pa.types.is_float64(self.utils.get_col_pa_type(df["c"]))
assert pa.types.is_floating(self.utils.get_col_pa_type(1.1))
assert pa.types.is_boolean(self.utils.get_col_pa_type(df["d"]))
assert pa.types.is_boolean(self.utils.get_col_pa_type(False))
assert pa.types.is_timestamp(self.utils.get_col_pa_type(df["e"]))
assert pa.types.is_timestamp(self.utils.get_col_pa_type(datetime.now()))
def test_unary_arithmetic_op(self):
pdf = pd.DataFrame([[2.0], [0.0], [None], [-3.0]], columns=["a"])
df = self.to_df(pdf)
df["a"] = self.utils.unary_arithmetic_op(df["a"], "+")
assert_pdf_eq(self.to_pd(df), pdf)
df["a"] = self.utils.unary_arithmetic_op(df["a"], "-")
pdf = pd.DataFrame([[-2.0], [0.0], [None], [3.0]], columns=["a"])
assert_pdf_eq(self.to_pd(df), pdf)
df["a"] = self.utils.unary_arithmetic_op(-10.1, "-")
pdf = pd.DataFrame([[10.1], [10.1], [10.1], [10.1]], columns=["a"])
assert_pdf_eq(self.to_pd(df), pdf)
raises(
NotImplementedError,
lambda: self.utils.unary_arithmetic_op(df["a"], "]"),
)
def test_binary_arithmetic_op(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.binary_arithmetic_op(df.a, df.b, op)
df["e"] = self.utils.binary_arithmetic_op(df.a, 1.0, op)
df["f"] = self.utils.binary_arithmetic_op(1.0, df.b, op)
df["g"] = self.utils.binary_arithmetic_op(1.0, 2.0, op)
df["h"] = self.utils.binary_arithmetic_op(1.0, df.c, op)
df["i"] = self.utils.binary_arithmetic_op(df.a, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghi")]),
f"""
SELECT
a{op}b AS d, a{op}1.0 AS e, 1.0{op}b AS f,
1.0{op}2.0 AS g, 1.0{op}c AS h, a{op}c AS i
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[1.0, 2.0, 3.0, 4.0],
b=[2.0, 2.0, 0.1, 2.0],
c=[1.0, None, 1.0, float("nan")],
)
)
test_(pdf, "+")
test_(pdf, "-")
test_(pdf, "*")
test_(pdf, "/")
# Integer division and dividing by 0 do not have consistent behaviors
# on different SQL engines. So we can't unify.
# SELECT 1.0/0.0 AS x, 1/2 AS y
def test_comparison_op_num(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, 2.0, op)
df["f"] = self.utils.comparison_op(2.0, df.b, op)
df["g"] = self.utils.comparison_op(2.0, 3.0, op)
df["h"] = self.utils.comparison_op(2.0, df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}2.0 AS e, 2.0{op}b AS f,
2.0{op}3.0 AS g, 2.0{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
assert self.utils.comparison_op(None, None, op) is None
pdf = pd.DataFrame(
dict(
a=[1.0, 2.0, 3.0, 4.0],
b=[2.0, 2.0, 0.1, 2.0],
c=[2.0, None, 2.0, float("nan")],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_comparison_op_str(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, "y", op)
df["f"] = self.utils.comparison_op("y", df.b, op)
df["g"] = self.utils.comparison_op("y", "z", op)
df["h"] = self.utils.comparison_op("y", df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}'y' AS e, 'y'{op}b AS f,
'y'{op}'z' AS g, 'y'{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=["xx", None, "x"],
b=[None, "t", "tt"],
c=["zz", None, "z"],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_comparison_op_time(self):
t = datetime(2019, 1, 1)
x = datetime(2020, 1, 1)
y = datetime(2020, 1, 2)
z = datetime(2020, 1, 3)
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, y, op)
df["f"] = self.utils.comparison_op(y, df.b, op)
df["g"] = self.utils.comparison_op(y, z, op)
df["h"] = self.utils.comparison_op(y, df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}'{y}' AS e, '{y}'{op}b AS f,
'{y}'{op}'{z}' AS g, '{y}'{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[x, None, x],
b=[None, t, t],
c=[z, z, None],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_binary_logical_op(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.binary_logical_op(df.a, df.b, op)
df["e"] = self.utils.binary_logical_op(df.a, True, op)
df["f"] = self.utils.binary_logical_op(True, df.b, op)
df["g"] = self.utils.binary_logical_op(df.a, False, op)
df["h"] = self.utils.binary_logical_op(False, df.b, op)
df["i"] = self.utils.binary_logical_op(True, False, op)
df["j"] = self.utils.binary_logical_op(True, None, op)
df["k"] = self.utils.binary_logical_op(False, None, op)
df["l"] = self.utils.binary_logical_op(None, None, op)
assert_duck_eq(
self.to_pd(df[list("defghijkl")]),
f"""
SELECT
a {op} b AS d, a {op} TRUE AS e, TRUE {op} b AS f,
a {op} FALSE AS g, FALSE {op} b AS h, TRUE {op} FALSE AS i,
TRUE {op} NULL AS j, FALSE {op} NULL AS k, NULL {op} NULL AS l
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[True, False, True, False, True, False, None],
b=[False, True, True, False, None, None, None],
)
)
test_(pdf, "and")
test_(pdf, "or")
def test_logical_not(self):
def test_(pdf: pd.DataFrame):
df = self.to_df(pdf)
df["c"] = self.utils.logical_not(df.a)
df["e"] = self.utils.logical_not(True)
df["f"] = self.utils.logical_not(False)
df["g"] = self.utils.logical_not(None)
assert_duck_eq(
self.to_pd(df[list("cefg")]),
"""
SELECT
NOT a AS c, NOT TRUE AS e,
NOT FALSE AS f, NOT NULL AS g
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(dict(a=[True, False, None]))
test_(pdf)
def test_filter_df(self):
def test_(pdf: pd.DataFrame):
df = self.to_df(pdf)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, df["a"])),
"""
SELECT * FROM pdf WHERE a
""",
pdf=pdf,
check_order=False,
)
test_(pd.DataFrame(dict(a=[True, False], b=[1.0, 2.0])))
test_(pd.DataFrame(dict(a=[False, False], b=[1.0, 2.0])))
test_(pd.DataFrame(dict(a=[1.0, 0.0, None], b=[1.0, 2.0, 3.0])))
test_(pd.DataFrame(dict(a=[float("nan"), 0.0, None], b=[1.0, 2.0, 3.0])))
pdf = pd.DataFrame([[1], [2]], columns=["a"])
df = self.to_df(pdf)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, True)),
"""
SELECT * FROM pdf WHERE TRUE
""",
pdf=pdf,
check_order=False,
)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, False)),
"""
SELECT * FROM pdf WHERE FALSE
""",
pdf=pdf,
check_order=False,
)
def test_is_value(self):
assert self.utils.is_value(None, None, True)
assert not self.utils.is_value(None, None, False)
assert not self.utils.is_value(None, True, True)
assert self.utils.is_value(None, True, False)
assert not self.utils.is_value(None, False, True)
assert self.utils.is_value(None, False, False)
assert self.utils.is_value(float("nan"), None, True)
assert not self.utils.is_value(float("nan"), None, False)
assert self.utils.is_value(pd.NaT, None, True)
assert not self.utils.is_value(pd.NaT, None, False)
assert not self.utils.is_value("abc", None, True)
assert self.utils.is_value("abc", None, False)
assert not self.utils.is_value(True, None, True)
assert self.utils.is_value(True, None, False)
assert self.utils.is_value(True, True, True)
assert not self.utils.is_value(True, True, False)
assert not self.utils.is_value(True, False, True)
assert self.utils.is_value(True, False, False)
assert not self.utils.is_value(-1.1, None, True)
assert self.utils.is_value(-1.1, None, False)
assert self.utils.is_value(-1.1, True, True)
assert not self.utils.is_value(-1.1, True, False)
assert not self.utils.is_value(-1.1, False, True)
assert self.utils.is_value(-1.1, False, False)
assert not self.utils.is_value(False, None, True)
assert self.utils.is_value(False, None, False)
assert not self.utils.is_value(False, True, True)
assert self.utils.is_value(False, True, False)
assert self.utils.is_value(False, False, True)
assert not self.utils.is_value(False, False, False)
assert not self.utils.is_value(0, None, True)
assert self.utils.is_value(0, None, False)
assert not self.utils.is_value(0, True, True)
assert self.utils.is_value(0, True, False)
assert self.utils.is_value(0, False, True)
assert not self.utils.is_value(0, False, False)
with raises(NotImplementedError):
self.utils.is_value(0, "x", False)
pdf = pd.DataFrame(dict(a=[True, False, None]))
df = self.to_df(pdf)
df["h"] = self.utils.is_value(df["a"], None, True)
df["i"] = self.utils.is_value(df["a"], None, False)
df["j"] = self.utils.is_value(df["a"], True, True)
df["k"] = self.utils.is_value(df["a"], True, False)
df["l"] = self.utils.is_value(df["a"], False, True)
df["m"] = self.utils.is_value(df["a"], False, False)
assert_pdf_eq(
self.to_pd(df[list("hijklm")]),
pd.DataFrame(
dict(
h=[False, False, True],
i=[True, True, False],
j=[True, False, False],
k=[False, True, True],
l=[False, True, False],
m=[True, False, True],
),
),
check_order=False,
)
def test_is_in(self):
assert self.utils.is_in(None, [None, 1], True) is None
assert self.utils.is_in(None, [None, 1], False) is None
assert self.utils.is_in(None, ["a", "b"], True) is None
assert self.utils.is_in(None, ["a", "b"], False) is None
assert self.utils.is_in(True, [False, True], True)
assert not self.utils.is_in(True, [False, True], False)
assert self.utils.is_in(False, [None, False], True)
assert not self.utils.is_in(False, [None, False], False)
assert self.utils.is_in(True, [None, False], True) is None
assert self.utils.is_in(True, [None, False], False) is None
assert self.utils.is_in(1, [2, 1], True)
assert not self.utils.is_in(1, [2, 1], False)
assert self.utils.is_in(1, [None, 1], True)
assert not self.utils.is_in(1, [None, 1], False)
assert self.utils.is_in(1, [None, 2], True) is None
assert self.utils.is_in(1, [None, 2], False) is None
assert self.utils.is_in(1.1, [2.2, 1.1], True)
assert not self.utils.is_in(1.1, [2.2, 1.1], False)
assert self.utils.is_in(1.1, [None, 1.1], True)
assert not self.utils.is_in(1.1, [None, 1.1], False)
assert self.utils.is_in(1.1, [None, 2.2], True) is None
assert self.utils.is_in(1.1, [None, 2.2], False) is None
assert self.utils.is_in("aa", ["bb", "aa"], True)
assert not self.utils.is_in("aa", ["bb", "aa"], False)
assert self.utils.is_in("aa", [None, "aa"], True)
assert not self.utils.is_in("aa", [None, "aa"], False)
assert self.utils.is_in("aa", [None, "bb"], True) is None
assert self.utils.is_in("aa", [None, "b"], False) is None
assert self.utils.is_in(
date(2020, 1, 1), [date(2020, 1, 2), date(2020, 1, 1)], True
)
assert not self.utils.is_in(
date(2020, 1, 1), [date(2020, 1, 2), date(2020, 1, 1)], False
)
assert self.utils.is_in(date(2020, 1, 1), [pd.NaT, date(2020, 1, 1)], True)
assert not self.utils.is_in(
date(2020, 1, 1), [None, date(2020, 1, 1)], False
)
assert (
self.utils.is_in(date(2020, 1, 1), [pd.NaT, date(2020, 1, 2)], True)
is None
)
assert (
self.utils.is_in(date(2020, 1, 1), [None, date(2020, 1, 2)], False)
is None
)
def test_is_in_sql(self):
pdf = pd.DataFrame(
dict(
a=[True, False, None],
b=[1, 2, None],
c=[1.1, 2.2, None],
d=["aa", "bb", None],
e=[date(2020, 1, 1), date(2020, 1, 2), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.is_in(df["a"], [False, None], True)
df["i"] = self.utils.is_in(df["a"], [False, None], False)
df["j"] = self.utils.is_in(df["b"], [1, 3, None], True)
df["k"] = self.utils.is_in(df["b"], [1, 3, None], False)
df["l"] = self.utils.is_in(df["c"], [1.1, 3.3, None], True)
df["m"] = self.utils.is_in(df["c"], [1.1, 3.3, None], False)
df["n"] = self.utils.is_in(df["d"], ["aa", "cc", None], True)
df["o"] = self.utils.is_in(df["d"], ["aa", "cc", None], False)
df["p"] = self.utils.is_in(
df["e"], [date(2020, 1, 1), date(2020, 1, 3), None], True
)
df["q"] = self.utils.is_in(
df["e"], [date(2020, 1, 1), date(2020, 1, 3), None], False
)
assert_duck_eq(
self.to_pd(df[list("jklmnopq")]),
"""
SELECT
-- a IN (FALSE, NULL) AS h,
-- a NOT IN (FALSE, NULL) AS i,
b IN (3, 1, NULL) AS j,
b NOT IN (3, 1, NULL) AS k,
c IN (3.3, 1.1, NULL) AS l,
c NOT IN (3.3, 1.1, NULL) AS m,
d IN ('cc', 'aa', NULL) AS n,
d NOT IN ('cc', 'aa', NULL) AS o,
e IN ('2020-01-03', '2020-01-01', NULL) AS p,
e NOT IN ('2020-01-03', '2020-01-01', NULL) AS q
FROM a
""",
a=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[1.1, 2.2, None],
b=[1.1, None, None],
c=[None, 2.2, None],
d=[3.3, None, None],
e=[None, 4.4, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.is_in(df["a"], [df["b"], df["c"]], True)
df["i"] = self.utils.is_in(df["a"], [df["b"], df["c"]], False)
df["j"] = self.utils.is_in(df["a"], [df["d"], df["e"]], True)
df["k"] = self.utils.is_in(df["a"], [df["d"], df["e"]], False)
df["l"] = self.utils.is_in(df["a"], [df["b"], df["d"], None], True)
df["m"] = self.utils.is_in(df["a"], [df["b"], df["d"], None], False)
assert_duck_eq(
self.to_pd(df[list("hijklm")]),
"""
SELECT
a IN (b, c) AS h,
a NOT IN (b, c) AS i,
a IN (d, e) AS j,
a NOT IN (d, e) AS k,
a IN (b, d, NULL) AS l,
a NOT IN (b, d, NULL) AS m
FROM a
""",
a=pdf,
check_order=False,
)
def test_is_between(self):
# if col is null, then the result is null
for a in [1, 2, None]:
for b in [1, 2, None]:
for p in [True, False]:
assert self.utils.is_between(None, a, b, p) is None
# one side is none and the result can't be determined, so null
assert self.utils.is_between(2, None, 2, True) is None
assert self.utils.is_between(2, None, 2, False) is None
assert self.utils.is_between(3, 2, None, True) is None
assert self.utils.is_between(3, 2, None, False) is None
# one side is none but the result is still deterministic
assert not self.utils.is_between(3, None, 2, True)
assert self.utils.is_between(3, None, 2, False)
assert not self.utils.is_between(1, 2, None, True)
assert self.utils.is_between(1, 2, None, False)
# if lower and upper are both nulls, the result is null
assert self.utils.is_between(3, None, None, True) is None
assert self.utils.is_between(3, None, None, False) is None
# happy paths
assert self.utils.is_between(1, 1, 2, True)
assert not self.utils.is_between(2, 1, 2, False)
assert not self.utils.is_between(0, 1, 2, True)
assert self.utils.is_between(0, 1, 2, False)
assert not self.utils.is_between(3, 1, 2, True)
assert self.utils.is_between(3, 1, 2, False)
assert self.utils.is_between("bb", "bb", "cc", True)
assert not self.utils.is_between("cc", "bb", "cc", False)
assert not self.utils.is_between("aa", "bb", "cc", True)
assert self.utils.is_between("aa", "bb", "cc", False)
assert self.utils.is_between(
date(2020, 1, 2), date(2020, 1, 2), date(2020, 1, 3), True
)
assert not self.utils.is_between(
date(2020, 1, 3), date(2020, 1, 2), date(2020, 1, 3), False
)
assert not self.utils.is_between(
date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3), True
)
assert self.utils.is_between(
date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3), False
)
def test_is_between_sql(self):
pdf = make_rand_df(100, a=(float, 20), b=(float, 20), c=(float, 20))
# pdf = make_rand_df(5, a=(float, 2), b=(float, 2), c=(float, 2))
print(pdf)
df = self.to_df(pdf)
df["h"] = self.utils.is_between(df["a"], df["b"], df["c"], True)
df["i"] = self.utils.is_between(df["a"], df["b"], df["c"], False)
df["j"] = self.utils.is_between(None, df["b"], df["c"], True)
df["k"] = self.utils.is_between(None, df["b"], df["c"], False)
df["l"] = self.utils.is_between(df["a"], df["b"], None, True)
df["m"] = self.utils.is_between(df["a"], df["b"], None, False)
df["n"] = self.utils.is_between(df["a"], None, df["c"], True)
df["o"] = self.utils.is_between(df["a"], None, df["c"], False)
df["p"] = self.utils.is_between(df["a"], 0.5, df["c"], True)
df["q"] = self.utils.is_between(df["a"], 0.5, df["c"], False)
df["r"] = self.utils.is_between(df["a"], df["b"], 0.5, True)
df["s"] = self.utils.is_between(df["a"], df["b"], 0.5, False)
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrs")]),
"""
SELECT
a BETWEEN b AND c AS h,
a NOT BETWEEN b AND c AS i,
NULL BETWEEN b AND c AS j,
NULL NOT BETWEEN b AND c AS k,
a BETWEEN b AND NULL AS l,
a NOT BETWEEN b AND NULL AS m,
a BETWEEN NULL AND c AS n,
a NOT BETWEEN NULL AND c AS o,
a BETWEEN 0.5 AND c AS p,
a NOT BETWEEN 0.5 AND c AS q,
a BETWEEN b AND 0.5 AS r,
a NOT BETWEEN b AND 0.5 AS s
FROM a
""",
a=pdf,
check_order=False,
)
def test_cast_coalesce_sql(self):
pdf = make_rand_df(100, a=(float, 50), b=(float, 50), c=(float, 50))
df = self.to_df(pdf)
df["g"] = self.utils.coalesce([None])
df["h"] = self.utils.coalesce([None, 10.1, None])
df["i"] = self.utils.coalesce([df["a"], 10.1])
df["j"] = self.utils.coalesce([10.1, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], 10.1])
assert_duck_eq(
self.to_pd(df[list("ghijklmn")]),
"""
SELECT
COALESCE(NULL) AS g,
COALESCE(NULL, 10.1, NULL) AS h,
COALESCE(a, 10.1) AS i,
COALESCE(10.1, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,10.1) AS n
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(bool, 50), b=(bool, 50), c=(bool, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, False, None])
df["i"] = self.utils.coalesce([df["a"], False])
df["j"] = self.utils.coalesce([False, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], False])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, FALSE) AS h,
COALESCE(a, FALSE) AS i,
COALESCE(FALSE, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,FALSE) AS n
FROM (SELECT
CAST(a AS BOOLEAN) a,
CAST(b AS BOOLEAN) b,
CAST(c AS BOOLEAN) c FROM a)
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(int, 50), b=(int, 50), c=(int, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, 10, None])
df["i"] = self.utils.coalesce([df["a"], 10])
df["j"] = self.utils.coalesce([10, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], 10])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, 10) AS h,
COALESCE(a, 10) AS i,
COALESCE(10, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,10) AS n
FROM (SELECT
CAST(a AS INTEGER) a,
CAST(b AS INTEGER) b,
CAST(c AS INTEGER) c FROM a)
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(str, 50), b=(str, 50), c=(str, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, "xx", None])
df["i"] = self.utils.coalesce([df["a"], "xx"])
df["j"] = self.utils.coalesce(["xx", df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], "xx"])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, 'xx') AS h,
COALESCE(a, 'xx') AS i,
COALESCE('xx', a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,'xx') AS n
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
100, a=(datetime, 50), b=(datetime, 50), c=(datetime, 50)
)
ct = datetime(2020, 1, 1, 15)
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, ct, None])
df["i"] = self.utils.coalesce([df["a"], ct])
df["j"] = self.utils.coalesce([ct, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], ct])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, TIMESTAMP '2020-01-01 15:00:00') AS h,
COALESCE(a, TIMESTAMP '2020-01-01 15:00:00') AS i,
COALESCE(TIMESTAMP '2020-01-01 15:00:00', a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,TIMESTAMP '2020-01-01 15:00:00') AS n
FROM a
""",
a=pdf,
check_order=False,
)
def test_case_when(self):
assert 4 == self.utils.case_when(default=4)
assert 3 == self.utils.case_when((False, 1), (2, 3), default=4)
assert 3 == self.utils.case_when((None, 1), (2, 3), default=4)
assert 1 == self.utils.case_when((True, 1), (2, 3), default=4)
assert 4 == self.utils.case_when((False, 1), (False, 3), default=4)
def test_case_when_sql(self):
pdf = make_rand_df(20, a=bool, b=str, c=bool, d=(str, 10), e=(str, 10))
df = self.to_df(pdf)
df["h"] = self.utils.case_when((df["a"], df["b"]), (df["c"], df["d"]))
df["i"] = self.utils.case_when(
(df["a"], df["b"]), (df["c"], df["d"]), default=df["e"]
)
assert_duck_eq(
self.to_pd(df[list("hi")]),
"""
SELECT
CASE WHEN a THEN b WHEN c THEN d END AS h,
CASE WHEN a THEN b WHEN c THEN d ELSE e END AS i
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
20, a=(bool, 10), b=(str, 10), c=(bool, 10), d=(str, 10), e=(str, 10)
)
df = self.to_df(pdf)
df["h"] = self.utils.case_when((df["a"], df["b"]), (df["c"], df["d"]))
df["i"] = self.utils.case_when(
(df["a"], df["b"]), (df["c"], df["d"]), default=df["e"]
)
assert_duck_eq(
self.to_pd(df[list("hi")]),
"""
SELECT
CASE WHEN a THEN b WHEN c THEN d END AS h,
CASE WHEN a THEN b WHEN c THEN d ELSE e END AS i
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
20,
a=(float, 10),
b=(float, 10),
c=(float, 10),
d=(float, 10),
e=(float, 10),
)
df = self.to_df(pdf)
df["h"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]), ((df["c"] > 0.5) | (df["a"] > 0.3), df["d"])
)
df["i"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]),
((df["c"] > 0.5) | (df["a"] > 0.3), df["d"]),
default=df["e"],
)
df["j"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["k"] = self.utils.case_when(
(None, df["b"]),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["l"] = self.utils.case_when(
(True, 2),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["m"] = self.utils.case_when(
(True, None),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
assert_duck_eq(
self.to_pd(df[list("hijklm")]),
"""
SELECT
CASE
WHEN a>0.5 THEN b
WHEN c>0.5 OR a>0.3 THEN d END AS h,
CASE
WHEN a>0.5 THEN b
WHEN c>0.5 OR a>0.3 THEN d
ELSE e END AS i,
CASE
WHEN a>0.5 THEN b
WHEN a>0.5 THEN d
ELSE e END AS j,
CASE
WHEN NULL THEN b
WHEN a>0.5 THEN d
ELSE e END AS k,
CASE
WHEN TRUE THEN 2
WHEN a>0.5 THEN d
ELSE e END AS l,
CASE
WHEN TRUE THEN NULL
WHEN a>0.5 THEN d
ELSE e END AS m
FROM a
""",
a=pdf,
check_order=False,
)
def test_like(self):
# nulls
for p in [True, False]:
for i in [True, False]:
assert (
self.utils.like(None, None, ignore_case=i, positive=p) is None
)
assert self.utils.like("x", None, ignore_case=i, positive=p) is None
# empty
assert self.utils.like("", "")
assert not self.utils.like("abc", "")
# simple
assert not self.utils.like("abc", "aBc")
assert self.utils.like("abc", "aBc", ignore_case=True)
# start
assert not self.utils.like("abc", "aB%")
assert not self.utils.like("abc", "aB_")
assert self.utils.like("abc", "aB%", ignore_case=True)
assert self.utils.like("abc", "aB_", ignore_case=True)
# end
assert not self.utils.like("abc", "%Bc")
assert not self.utils.like("abc", "_Bc")
assert self.utils.like("abc", "%Bc", ignore_case=True)
assert self.utils.like("abc", "_Bc", ignore_case=True)
# start end
assert not self.utils.like("abc", "A_c")
assert not self.utils.like("abc", "A%c")
assert self.utils.like("abc", "A_c", ignore_case=True)
assert self.utils.like("abc", "A%c", ignore_case=True)
# contain
assert not self.utils.like("abc", "%B%")
assert not self.utils.like("abc", "_B_")
assert self.utils.like("abc", "%B%", ignore_case=True)
assert self.utils.like("abc", "_B_", ignore_case=True)
# not empty
assert self.utils.like("abc", "_%")
assert self.utils.like("abc", "%_")
assert self.utils.like("abc", "%_%")
# any
assert self.utils.like("abc", "%")
def test_like_sql(self):
pdf = pd.DataFrame(
dict(a=["abc", "ABC", "abd", "aBd", "", "ab\\%\\_c", None])
)
df = self.to_df(pdf)
df["h"] = self.utils.like(df["a"], None)
df["i"] = self.utils.like(df["a"], "")
df["j"] = self.utils.like(df["a"], "abc", ignore_case=True)
df["k"] = self.utils.like(df["a"], "aBc", ignore_case=False)
df["l"] = self.utils.like(df["a"], "ab%", ignore_case=True)
df["m"] = self.utils.like(df["a"], "aB%", ignore_case=False)
df["n"] = self.utils.like(df["a"], "%bc", ignore_case=True)
df["o"] = self.utils.like(df["a"], "%bc", ignore_case=False)
df["p"] = self.utils.like(df["a"], "a%c", ignore_case=True)
df["q"] = self.utils.like(df["a"], "a%c", ignore_case=False)
df["r"] = self.utils.like(df["a"], "%bc%", ignore_case=True)
df["s"] = self.utils.like(df["a"], "%bc%", ignore_case=False)
df["t"] = self.utils.like(df["a"], "%_")
df["u"] = self.utils.like(df["a"], "_%")
df["v"] = self.utils.like(df["a"], "%_%")
df["w"] = self.utils.like(df["a"], "_a%", ignore_case=True)
df["x"] = self.utils.like(df["a"], "_a%", ignore_case=False)
df["y"] = self.utils.like(df["a"], "%")
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrstuvwxy")]),
"""
SELECT
a LIKE NULL AS h,
a LIKE '' AS i,
a ILIKE 'abc' AS j,
a LIKE 'aBc' AS k,
a ILIKE 'ab%' AS l,
a LIKE 'aB%' AS m,
a ILIKE '%bc' AS n,
a LIKE '%bc' AS o,
a ILIKE 'a%c' AS p,
a LIKE 'a%c' AS q,
a ILIKE '%bc%' AS r,
a LIKE '%bc%' AS s,
a LIKE '%_' AS t,
a LIKE '_%' AS u,
a LIKE '%_%' AS v,
a ILIKE '_a%' AS w,
a LIKE '_a%' AS x,
a LIKE '%' AS y
FROM a
""",
a=pdf,
check_order=False,
)
df = self.to_df(pdf)
df["h"] = self.utils.like(df["a"], None, positive=False)
df["i"] = self.utils.like(df["a"], "", positive=False)
df["j"] = self.utils.like(df["a"], "abc", ignore_case=True, positive=False)
df["k"] = self.utils.like(df["a"], "aBc", ignore_case=False, positive=False)
df["l"] = self.utils.like(df["a"], "ab%", ignore_case=True, positive=False)
df["m"] = self.utils.like(df["a"], "aB%", ignore_case=False, positive=False)
df["n"] = self.utils.like(df["a"], "%bc", ignore_case=True, positive=False)
df["o"] = self.utils.like(df["a"], "%bc", ignore_case=False, positive=False)
df["p"] = self.utils.like(df["a"], "a%c", ignore_case=True, positive=False)
df["q"] = self.utils.like(df["a"], "a%c", ignore_case=False, positive=False)
df["r"] = self.utils.like(df["a"], "%bc%", ignore_case=True, positive=False)
df["s"] = self.utils.like(
df["a"], "%bc%", ignore_case=False, positive=False
)
df["t"] = self.utils.like(df["a"], "%_", positive=False)
df["u"] = self.utils.like(df["a"], "_%", positive=False)
df["v"] = self.utils.like(df["a"], "%_%", positive=False)
df["w"] = self.utils.like(df["a"], "_a%", ignore_case=True, positive=False)
df["x"] = self.utils.like(df["a"], "_a%", ignore_case=False, positive=False)
df["y"] = self.utils.like(df["a"], "%", positive=False)
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrstuvwxy")]),
"""
SELECT
a NOT LIKE NULL AS h,
a NOT LIKE '' AS i,
a NOT ILIKE 'abc' AS j,
a NOT LIKE 'aBc' AS k,
a NOT ILIKE 'ab%' AS l,
a NOT LIKE 'aB%' AS m,
a NOT ILIKE '%bc' AS n,
a NOT LIKE '%bc' AS o,
a NOT ILIKE 'a%c' AS p,
a NOT LIKE 'a%c' AS q,
a NOT ILIKE '%bc%' AS r,
a NOT LIKE '%bc%' AS s,
a NOT LIKE '%_' AS t,
a NOT LIKE '_%' AS u,
a NOT LIKE '%_%' AS v,
a NOT ILIKE '_a%' AS w,
a NOT LIKE '_a%' AS x,
a NOT LIKE '%' AS y
FROM a
""",
a=pdf,
check_order=False,
)
def test_cast_constant(self):
assert self.utils.cast(None, bool) is None
assert self.utils.cast(True, bool)
assert not self.utils.cast(False, bool)
assert self.utils.cast(float("nan"), bool) is None
assert not self.utils.cast(0, bool)
assert 1 == self.utils.cast(1, bool)
assert 1 == self.utils.cast(-2, bool)
assert 0 == self.utils.cast(0.0, bool)
assert 1 == self.utils.cast(0.1, bool)
assert 1 == self.utils.cast(-0.2, bool)
assert 1 == self.utils.cast(float("inf"), bool)
assert 1 == self.utils.cast(float("-inf"), bool)
assert self.utils.cast("nan", bool) is None
assert 1 == self.utils.cast("tRue", bool)
assert 0 == self.utils.cast("fAlse", bool)
assert self.utils.cast(None, int) is None
assert 1 == self.utils.cast(True, int)
assert 0 == self.utils.cast(False, int)
assert self.utils.cast(float("nan"), int) is None
assert 0 == self.utils.cast(0, int)
assert 10 == self.utils.cast(10, int)
assert 0 == self.utils.cast(0.0, int)
assert 1 == self.utils.cast(1.1, int)
assert -2 == self.utils.cast(-2.2, int)
assert 0 == self.utils.cast("0", int)
assert 10 == self.utils.cast("10", int)
assert 0 == self.utils.cast("0.0", int)
assert 1 == self.utils.cast("1.1", int)
assert -2 == self.utils.cast("-2.2", int)
assert self.utils.cast("nan", int) is None
with raises(SlideCastError):
assert self.utils.cast(float("inf"), int)
with raises(SlideCastError):
assert self.utils.cast(float("-inf"), int)
assert self.utils.cast(None, float) is None
assert 1.0 == self.utils.cast(True, float)
assert 0.0 == self.utils.cast(False, float)
assert self.utils.cast(float("nan"), float) is None
assert 0.0 == self.utils.cast(0, float)
assert 10.0 == self.utils.cast(10, float)
assert 0.0 == self.utils.cast(0.0, float)
assert 1.1 == self.utils.cast(1.1, float)
assert -2.2 == self.utils.cast(-2.2, float)
assert 0.0 == self.utils.cast("0", float)
assert 10.0 == self.utils.cast("10", float)
assert 0.0 == self.utils.cast("0.0", float)
assert 1.1 == self.utils.cast("1.1", float)
assert -2.2 == self.utils.cast("-2.2", float)
assert self.utils.cast("nan", float) is None
assert np.isinf(self.utils.cast(float("inf"), float))
assert np.isinf(self.utils.cast(float("-inf"), float))
assert self.utils.cast(None, str) is None
assert "true" == self.utils.cast(True, str)
assert "false" == self.utils.cast(False, str)
assert "true" == self.utils.cast(-10, str, bool)
assert "false" == self.utils.cast(0, str, bool)
assert "10" == self.utils.cast(10, str)
assert "0" == self.utils.cast(0, str)
assert "10.0" == self.utils.cast(10.0, str)
assert "-10.0" == self.utils.cast(-10.0, str)
assert self.utils.cast(float("nan"), str) is None
assert "inf" == self.utils.cast(float("inf"), str)
assert "-inf" == self.utils.cast(float("-inf"), str)
assert "xy" == self.utils.cast("xy", str)
assert isinstance(self.utils.cast(date(2020, 1, 1), str), str)
assert "2020-01-01" == self.utils.cast(date(2020, 1, 1), str)
assert "2020-01-01 15:00:00" == self.utils.cast(
datetime(2020, 1, 1, 15), str
)
assert self.utils.cast(pd.NaT, str) is None
assert self.utils.cast(None, "date") is None
assert self.utils.cast(None, "datetime") is None
assert self.utils.cast("nat", "date") is None
assert self.utils.cast("nat", "datetime") is None
assert date(2020, 1, 1) == self.utils.cast("2020-01-01", "date")
assert date(2020, 1, 1) == self.utils.cast("2020-01-01 15:00:00", "date")
assert datetime(2020, 1, 1) == self.utils.cast("2020-01-01", "datetime")
assert datetime(2020, 1, 1, 15) == self.utils.cast(
"2020-01-01 15:00:00", "datetime"
)
def test_cast_bool(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
df["i"] = self.utils.cast(df.a, float)
df["j"] = self.utils.cast(df.a, bool)
df["k"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[1.0, 0.0, 1.0],
j=[True, False, True],
k=["true", "false", "true"],
),
),
check_order=False,
)
# from bool with None
pdf = pd.DataFrame(
dict(
a=[True, False, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int, bool)
df["i"] = self.utils.cast(df.a, float)
df["j"] = self.utils.cast(df.a, bool, bool)
df["k"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, None],
i=[1.0, 0.0, None],
j=[True, False, None],
k=["true", "false", None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.0, 0.0, -2.0, None, float("nan")],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[True, False, True, None, None],
),
),
check_order=False,
)
# from int
pdf = pd.DataFrame(
dict(
a=[2, 0, -2],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[True, False, True],
),
),
check_order=False,
)
# from bool with None to various
pdf = pd.DataFrame(
dict(
a=[1.0, 0.0, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int, bool)
df["i"] = self.utils.cast(df.a, float, bool)
df["j"] = self.utils.cast(df.a, bool, bool)
df["k"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, None],
i=[1.0, 0.0, None],
j=[True, False, None],
k=["true", "false", None],
),
),
check_order=False,
)
# from strings
pdf = pd.DataFrame(
dict(
a=["tRue", "fAlse", "true"],
b=["tRue", "fAlse", None],
c=["1", "0", "abc"],
d=["1.0", "0.0", "abc"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool, str)
df["i"] = self.utils.cast(df.b, bool, str)
df["j"] = self.utils.cast(df.c, bool, str)
df["k"] = self.utils.cast(df.d, bool, str)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[True, False, True],
i=[True, False, None],
j=[True, False, None],
k=[True, False, None],
),
),
check_order=False,
)
# invalid
pdf = pd.DataFrame(
dict(
a=[datetime(2020, 1, 1)],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
df["h"] = self.utils.cast(df.a, bool)
def test_cast_int(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=["1", "2", "3"],
e=["5.5", "6.6", "7.7"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
df["i"] = self.utils.cast(df.b, int)
df["j"] = self.utils.cast(df.c, int)
df["k"] = self.utils.cast(df.d, int)
df["l"] = self.utils.cast(df.e, int)
assert_pdf_eq(
self.to_pd(df[list("hijkl")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[2, 3, 4],
j=[1, 2, 3],
k=[1, 2, 3],
l=[5, 6, 7],
),
),
check_order=False,
)
# from int with None
pdf = pd.DataFrame(
dict(
a=[2, 3, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, 3, None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, None, None],
),
),
check_order=False,
)
# from string with None
pdf = pd.DataFrame(
dict(
a=["2.1", "naN", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, None, None],
),
),
check_order=False,
)
# overflow, TODO: pandas can't raise exception
pdf = pd.DataFrame(
dict(
a=[10000, -10000],
)
)
# df = self.to_df(pdf)
# with raises(SlideCastError):
# df["h"] = self.utils.cast(df.a, "int8")
# invalid
pdf = pd.DataFrame(
dict(
a=[datetime(2020, 1, 1)],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
self.utils.series_to_array(self.utils.cast(df.a, int))
def test_cast_int_overflow(self):
pdf = pd.DataFrame(
dict(
a=[2.1, float("inf"), None],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
self.utils.series_to_array(self.utils.cast(df.a, int))
def test_cast_float(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=[2.0, 0.0, -1.0],
e=["5.5", "6.6", "7.7"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, float)
df["i"] = self.utils.cast(df.b, float)
df["j"] = self.utils.cast(df.c, float)
df["l"] = self.utils.cast(df.e, float)
assert_pdf_eq(
self.to_pd(df[list("hijl")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[2, 3, 4],
j=[1.1, 2.2, 3.3],
l=[5.5, 6.6, 7.7],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), float("inf"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "float32")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2.1, float("nan"), float("inf"), None],
),
).astype(np.float32),
check_order=False,
)
# from string with None
pdf = pd.DataFrame(
dict(
a=["2.1", "naN", "inf", "-inf", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, float)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2.1, None, float("inf"), float("-inf"), None],
),
),
check_order=False,
)
def test_cast_str(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[False, True, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=[
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
],
e=["aa", "ab", "ac"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
df["i"] = self.utils.cast(df.b, str)
df["j"] = self.utils.cast(df.c, str)
df["k"] = self.utils.cast(df.d, str)
df["l"] = self.utils.cast(df.e, str)
assert_pdf_eq(
self.to_pd(df[list("hijkl")]),
pd.DataFrame(
dict(
h=["false", "true", "true"],
i=["2", "3", "4"],
j=["1.1", "2.2", "3.3"],
k=["2020-01-02", "2020-01-03", "2020-01-04"],
l=["aa", "ab", "ac"],
),
),
check_order=False,
)
# from bool with None
pdf = pd.DataFrame(
dict(
a=[True, False, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["true", "false", None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), float("inf"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2.1", None, "inf", None],
),
),
check_order=False,
)
# from int with None
pdf = pd.DataFrame(
dict(
a=[1, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["1", None],
),
),
check_order=False,
)
# from timestamp with None
pdf = pd.DataFrame(
dict(
a=[
datetime(2020, 1, 1),
datetime(2020, 1, 1, 15, 2, 3),
pd.NaT,
None,
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2020-01-01 00:00:00", "2020-01-01 15:02:03", None, None],
),
),
check_order=False,
)
# from date with None
pdf = pd.DataFrame(
dict(
a=[
date(2020, 1, 1),
date(2020, 1, 2),
pd.NaT,
None,
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, "date")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2020-01-01", "2020-01-02", None, None],
),
),
check_order=False,
)
def test_cast_time(self):
# happy path
pdf = pd.DataFrame(
dict(
a=["2020-01-01", "2020-01-02", "2020-01-03"],
b=[
"2020-01-01 01:00:00",
"2020-01-02 14:00:00",
"2020-01-03 15:00:00",
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, date)
df["i"] = self.utils.cast(df.a, datetime)
df["j"] = self.utils.cast(df.b, "date")
df["k"] = self.utils.cast(df.b, datetime)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
i=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
j=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
k=[
datetime(2020, 1, 1, 1),
datetime(2020, 1, 2, 14),
datetime(2020, 1, 3, 15),
],
),
),
check_order=False,
)
# str -> date with None
pdf = pd.DataFrame(
dict(
a=["2020-01-01 01:00:00", "2020-01-02 00:00:00", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "date")
# assert_pdf_eq(
# self.to_pd(df[list("h")]),
# pd.DataFrame(
# dict(
# h=[datetime(2020, 1, 1), datetime(2020, 1, 2), None],
# ),
# ),
# check_order=False,
# )
# str -> datetime with None
pdf = pd.DataFrame(
dict(
a=["2020-01-01 11:00:00", "2020-01-02 12:00:00", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "datetime")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[datetime(2020, 1, 1, 11), datetime(2020, 1, 2, 12), None],
),
),
check_order=False,
)
def test_cast_df(self):
a = pd.DataFrame(dict(a=[1, 2, None], b=[True, None, False]))
df = self.utils.cast_df(
self.to_df(a.convert_dtypes()), Schema("a:int,b:bool").pa_schema
)
assert pd.Int32Dtype() == df["a"].dtype
assert pd.BooleanDtype() == df["b"].dtype
df = self.utils.cast_df(self.to_df(a), Schema("a:str,b:str").pa_schema)
assert pd.StringDtype() == df["a"].dtype
assert pd.StringDtype() == df["b"].dtype
# with input hint
a = pd.DataFrame(dict(a=[1, 2, None], b=[None, None, None]))
df = self.utils.cast_df(
self.to_df(a),
Schema("a:double,b:int").pa_schema,
Schema("a:int,b:double").pa_schema,
)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
# empty
a = pd.DataFrame(dict(a=[], b=[]))
df = self.utils.cast_df(self.to_df(a), Schema("a:double,b:int").pa_schema)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
# empty + input hint
a = pd.DataFrame(dict(a=[], b=[]))
df = self.utils.cast_df(
self.to_df(a),
Schema("a:double,b:int").pa_schema,
Schema("a:int,b:double").pa_schema,
)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
def test_cols_to_df(self):
df = self.to_df([["a", 1]], "a:str,b:long")
res = self.utils.cols_to_df([df["b"], df["a"]])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[1, "a"]], "b:long,a:str"))
)
res = self.utils.cols_to_df([df["b"], df["a"]], ["x", "y"])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[1, "a"]], "x:long,y:str"))
)
res = self.utils.cols_to_df([123, df["a"]], names=["x", "y"])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[123, "a"]], "x:long,y:str"))
)
with raises(SlideInvalidOperation):
res = self.utils.cols_to_df([123, 456], names=["x", "y"])
def test_to_schema(self):
df = self.to_df([[1.0, 2], [2.0, 3]])
raises(ValueError, lambda: self.utils.to_schema(df))
df = self.to_df([[1.0, 2], [2.1, 3]], columns=["x", "y"])
assert Schema("x:double,y:long") == Schema(self.utils.to_schema(df))
df = self.to_df([["a", 2], ["b", 3]], columns=["x", "y"])
assert Schema("x:str,y:long") == Schema(self.utils.to_schema(df))
df = self.to_df([], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("object")})
assert [pa.field("x", pa.int32()), pa.field("y", pa.string())] == list(
self.utils.to_schema(df)
)
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("object")})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype(str)})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("str")})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
# timestamp test
df = self.to_df(
[[datetime(2020, 1, 1, 2, 3, 4, 5), datetime(2020, 2, 2)]],
columns=["a", "b"],
)
assert Schema("a:datetime,b:datetime") == Schema(self.utils.to_schema(df))
def test_index_compatible(self):
df = self.to_df([[3.0, 2], [2.1, 3]], columns=["x", "y"])
df = df.nlargest(100, ["x"])
self.utils.ensure_compatible(df)
df["p"] = "p"
df = df.set_index(["p"])
df.index.name = None
raises(
SlideIndexIncompatibleError, lambda: self.utils.ensure_compatible(df)
)
df = df.reset_index(drop=True)
self.utils.ensure_compatible(df)
def test_as_array_iterable(self):
schema = Schema("a:str,b:int").pa_schema
df = self.to_df([], "a:str,b:int")
assert [] == self.utils.as_array(df, schema)
assert [] == self.utils.as_array(df, schema, type_safe=True)
df = self.to_df([["a", 1]], "a:str,b:int")
assert [["a", 1]] == self.utils.as_array(df, schema)
assert [["a", 1]] == self.utils.as_array(df, schema, columns=["a", "b"])
assert [[1, "a"]] == self.utils.as_array(df, schema, columns=["b", "a"])
# prevent pandas auto type casting
schema = Schema("a:double,b:int").pa_schema
df = self.to_df([[1.0, 1.0]], "a:double,b:int")
data = self.utils.as_array(df, schema)
assert [[1.0, 1]] == data
assert isinstance(data[0][0], float)
assert isinstance(data[0][1], int)
assert [[1.0, 1]] == self.utils.as_array(df, schema, columns=["a", "b"])
assert [[1, 1.0]] == self.utils.as_array(df, schema, columns=["b", "a"])
df = self.to_df([[np.float64(1.0), 1.0]], "a:double,b:int")
assert [[1.0, 1]] == self.utils.as_array(df, schema)
assert isinstance(self.utils.as_array(df, schema)[0][0], float)
assert isinstance(self.utils.as_array(df, schema)[0][1], int)
schema = Schema("a:datetime,b:int").pa_schema
df = self.to_df(
[[pd.Timestamp("2020-01-01"), 1.0]],
"a:datetime,b:int",
)
assert [[datetime(2020, 1, 1), 1]] == self.utils.as_array(df, schema)
assert isinstance(
self.utils.as_array(df, schema, type_safe=True)[0][0], datetime
)
assert isinstance(
self.utils.as_array(df, schema, type_safe=True)[0][1], int
)
df = self.to_df([[pd.NaT, 1.0]], "a:datetime,b:int")
assert self.utils.as_array(df, schema, type_safe=True)[0][0] is None
assert isinstance(
self.utils.as_array(df, schema, type_safe=True)[0][1], int
)
schema = Schema("a:double,b:int").pa_schema
df = self.to_df([[1.0, 1.0]], "a:double,b:int")
assert [[1.0, 1]] == self.utils.as_array(df, schema, type_safe=True)
assert isinstance(self.utils.as_array(df, schema)[0][0], float)
assert isinstance(self.utils.as_array(df, schema)[0][1], int)
def test_as_array_iterable_datetime(self):
df = self.to_df(
[[datetime(2020, 1, 1, 2, 3, 4, 5), date(2020, 2, 2)]],
columns="a:datetime,b:date",
)
v1 = list(
self.utils.as_array_iterable(
df, schema=expression_to_schema("a:datetime,b:date"), type_safe=True
)
)[0]
assert not isinstance(v1[0], pd.Timestamp)
assert isinstance(v1[0], datetime)
assert isinstance(v1[1], date)
def test_nested(self):
# data = [[dict(b=[30, "40"])]]
# s = expression_to_schema("a:{a:str,b:[int]}")
# df = self.to_df(data, "a:{a:str,b:[int]}")
# a = df.as_array(type_safe=True)
# assert [[dict(a=None, b=[30, 40])]] == a
data = [[[json.dumps(dict(b=[30, "40"]))]]]
df = self.to_df(data, "a:[{a:str,b:[int]}]", coerce=False)
a = self.utils.as_array(
df, schema=Schema("a:[{a:str,b:[int]}]").pa_schema, type_safe=True
)
assert [[[dict(a=None, b=[30, 40])]]] == a
data = [[json.dumps(["1", 2])]]
df = self.to_df(data, "a:[int]", coerce=False)
a = self.utils.as_array(
df, schema=Schema("a:[int]").pa_schema, type_safe=True
)
assert [[[1, 2]]] == a
def test_binary(self):
b = pickle.dumps("xyz")
data = [[b, b"xy"]]
df = self.to_df(data, "a:bytes,b:bytes")
a = self.utils.as_array(
df, schema=Schema("a:bytes,b:bytes").pa_schema, type_safe=True
)
assert [[b, b"xy"]] == a
def test_nan_none(self):
schema = Schema("b:str,c:double").pa_schema
df = self.to_df([[None, None]], "b:str,c:double")
arr = self.utils.as_array(df, schema, type_safe=True)[0]
assert arr[0] is None
assert arr[1] is None
df = self.to_df([], "b:str,c:double")
assert len(self.utils.as_array(df, schema)) == 0
schema = Schema("b:int,c:bool").pa_schema
df = self.to_df([[None, None]], "b:int,c:bool")
arr = self.utils.as_array(df, schema, type_safe=True)[0]
assert arr[0] is None
assert arr[1] is None
def test_boolean_enforce(self):
schema = Schema("b:int,c:bool").pa_schema
df = self.to_df([[1, True], [2, False], [3, None]], "b:int,c:bool")
arr = self.utils.as_array(df, schema, type_safe=True)
assert [[1, True], [2, False], [3, None]] == arr
df = self.to_df([[1, 1], [2, 0]], "b:int,c:bool")
arr = self.utils.as_array(df, schema, type_safe=True)
assert [[1, True], [2, False]] == arr
df = self.to_df([[1, 1.0], [2, 0.0]], "b:int,c:bool")
arr = self.utils.as_array(df, schema, type_safe=True)
assert [[1, True], [2, False]] == arr
def test_sql_group_by_apply(self):
df = self.to_df([["a", 1], ["a", 2], [None, 3]], "b:str,c:long")
def _m1(df):
self.utils.ensure_compatible(df)
df["ct"] = df.shape[0]
return df
schema = Schema("b:str,c:long,ct:long").pa_schema
res = self.utils.sql_groupby_apply(df, ["b"], _m1, output_schema=schema)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(res),
pd.DataFrame(
[["a", 1, 2], ["a", 2, 2], [None, 3, 1]], columns=["b", "c", "ct"]
),
)
res = self.utils.sql_groupby_apply(df, [], _m1, output_schema=schema)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(res),
pd.DataFrame(
[["a", 1, 3], ["a", 2, 3], [None, 3, 3]], columns=["b", "c", "ct"]
),
)
df = self.to_df(
[[1.0, "a"], [1.0, "b"], [None, "c"], [None, "d"]],
"b:double,c:str",
)
schema = Schema("b:double,c:str,ct:long").pa_schema
res = self.utils.sql_groupby_apply(df, ["b"], _m1, output_schema=schema)
assert_pdf_eq(
self.to_pd(res),
pd.DataFrame(
[
[1.0, "a", 2],
[1.0, "b", 2],
[None, "c", 2],
[None, "d", 2],
],
columns=["b", "c", "ct"],
),
)
def test_sql_group_by_apply_special_types(self):
def _m1(df):
self.utils.ensure_compatible(df)
# df["ct"] = df.shape[0]
return df.assign(ct=df.shape[0])
schema = Schema("a:str,b:double,ct:long").pa_schema
df = self.to_df(
[["a", 1.0], [None, 3.0], [None, 3.0], [None, None]],
"a:str,b:double",
)
res = self.utils.sql_groupby_apply(
df, ["a", "b"], _m1, output_schema=schema
)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(
self.to_df(
[
["a", 1.0, 1],
[None, 3.0, 2],
[None, 3.0, 2],
[None, None, 1],
],
"a:str,b:double,ct:long",
)
),
self.to_pd(res),
)
schema = Schema("a:str,b:datetime,ct:long").pa_schema
dt = datetime.now()
df = self.to_df(
[["a", dt], [None, dt], [None, dt], [None, None]],
"a:str,b:datetime",
)
res = self.utils.sql_groupby_apply(
df, ["a", "b"], _m1, output_schema=schema
)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(
self.to_df(
[["a", dt, 1], [None, dt, 2], [None, dt, 2], [None, None, 1]],
"a:str,b:datetime,ct:long",
)
),
self.to_pd(res),
)
schema = Schema("a:str,b:date,ct:long").pa_schema
dt = date(2020, 1, 1)
df = self.to_df(
[["a", dt], [None, dt], [None, dt], [None, None]],
"a:str,b:date",
)
res = self.utils.sql_groupby_apply(
df, ["a", "b"], _m1, output_schema=schema
)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(
self.to_df(
[["a", dt, 1], [None, dt, 2], [None, dt, 2], [None, None, 1]],
"a:str,b:date,ct:long",
)
),
self.to_pd(res),
)
dt = date(2020, 1, 1)
df = self.to_df(
[["a", dt], ["b", dt], ["b", dt], ["b", None]],
"a:str,b:date",
)
res = self.utils.sql_groupby_apply(
df, ["a", "b"], _m1, output_schema=schema
)
self.utils.ensure_compatible(res)
assert_pdf_eq(
self.to_pd(
self.to_df(
[["a", dt, 1], ["b", dt, 2], ["b", dt, 2], ["b", None, 1]],
"a:str,b:date,ct:int",
)
),
self.to_pd(res),
)
def test_drop_duplicates(self):
def assert_eq(df1, df2):
d1 = self.to_pd(self.utils.drop_duplicates(df1))
assert_pdf_eq(d1, df2, check_order=False)
a = self.to_df([["x", "a"], ["x", "a"], [None, None]], ["a", "b"])
assert_eq(a, pd.DataFrame([["x", "a"], [None, None]], columns=["a", "b"]))
def test_drop_duplicates_sql(self):
df = make_rand_df(100, a=int, b=int)
assert_duck_eq(
self.to_pd(self.utils.drop_duplicates(self.to_df(df))),
"SELECT DISTINCT * FROM a",
a=df,
check_order=False,
)
df = make_rand_df(100, a=(int, 50), b=(int, 50))
assert_duck_eq(
self.to_pd(self.utils.drop_duplicates(self.to_df(df))),
"SELECT DISTINCT * FROM a",
a=df,
check_order=False,
)
df = make_rand_df(100, a=(int, 50), b=(str, 50), c=float)
assert_duck_eq(
self.to_pd(self.utils.drop_duplicates(self.to_df(df))),
"SELECT DISTINCT * FROM a",
a=df,
check_order=False,
)
df = make_rand_df(100, a=(int, 50), b=(datetime, 50), c=float)
assert_duck_eq(
self.to_pd(self.utils.drop_duplicates(self.to_df(df))),
"SELECT DISTINCT * FROM a",
a=df,
check_order=False,
)
def test_union(self):
def assert_eq(df1, df2, unique, expected, expected_cols):
res = self.to_pd(self.utils.union(df1, df2, unique=unique))
assert_pdf_eq(
res,
|
pd.DataFrame(expected, columns=expected_cols)
|
pandas.DataFrame
|
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from PIL import Image, ImageDraw
import matplotlib.gridspec
from scipy.spatial import distance
from scipy.cluster import hierarchy
from matplotlib.font_manager import FontProperties
from scipy.cluster.hierarchy import leaves_list, ClusterNode, leaders
from sklearn.metrics import accuracy_score
import graphviz # allows visualizing decision trees,
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# Function, .............................................................................
def find_different_filetypes(*,
subsets_dict,
filetypes_dict,
path,
verbose=False
):
"""
A generaric Function that allows to find files that:
* are grouped together, eg by subset name like train, test, valid
* have the same core name, but different affix
This function to build new, logfile for data encoding - this aoows adding
ASSUMPTION: coprresponding batch labels and extracted features have the same file name
except for affix, _encoded.npy and _labels.csv
# Inputs:
. subsets_dict : dict,
<key> : str, name of the group of files eg: test, train, etc..
<value> : str, part of the pattern a apttern that allows to find all files belonging to one group
important: in case more then one pattern must be used to identify files form one group,
just name them with numbers, and later on replace in df returned by the function,
. filetypes_dict : dict,
<key> : str, name of affix added to the file of a given type
<value> : str, of affix added to the file of a given type
. path : full path to directory with fileas searched by the function,
. verbose : bool,
# returns
. dataFrame : df, where each row represents files form one group, wiht one core name,
eg: test_batch_01, test_batch_02 etc..., and rows names after filetypes_dict keys
have corresponding filetypes, eg:
test_batch_01_features.npy, test_batch_01_labels.csv,
additional columns shows also the path, and subset_type (key from subsets_dict)
# Note
the function find all files that mach any cobination of the following pattern
> f'{subsets_dict[<key>]}*filetypes_dict[<key>]'
"""
os.chdir(path)
filename_table_list = [] # one table with all file names,
# ...
for i, subset_name in enumerate(list(subsets_dict.keys())):
" subset_file_name_pat may allow finidng one, or many different files wiht eg 01, 02, ... 0n numbers or other designations "
" these shodul be "
# ........................................................................................
# get pattern used to find files from a given data subset/group
subset_pat = subsets_dict[subset_name] # pat may be str or a list with >=1 str,
first_filetype = filetypes_dict[list(filetypes_dict.keys())[0]]
# ........................................................................................
# step 1. find first file, to later on final any othe file types in the same order
one_subset_corename_with_one_filetype_list = []
# if, there is a list, it means more then one pattern was mashing to a given subset,
if isinstance(subset_pat, list):
for one_subset_pat in subset_pat:
for file in glob.glob(f"{one_subset_pat}*{first_filetype}"):
one_subset_corename_with_one_filetype_list.append(file)
else:
for file in glob.glob(f"{subset_pat}*{first_filetype}"):
one_subset_corename_with_one_filetype_list.append(file)
# ........................................................................................
# step 2. find all different types of associated files defined by different file_affix_pat
""" LIMITATION: these different types of files shdoul be in the same directory
"""
# .. test if anything coult be found
if len(one_subset_corename_with_one_filetype_list)==0:
if verbose==True:
print(f"{subset_name} - No files were found using provided subset_pat_list & filetype_pat_list[0]")
else:
pass
pass # becausde there is nothing to continue with, and I dont want to stop on that
else:
if verbose==True:
print(f"{subset_name} - {len(one_subset_corename_with_one_filetype_list)} files were found, at least for the first filetype")
else:
pass
# .. remove affix, and create core file names that can be used to find other types of files,
""" and create list of core files that can be used to search
for different types of files for each item in that list
"""
one_subset_corename_list = pd.Series(one_subset_corename_with_one_filetype_list).str.split(first_filetype, expand=True).iloc[:, 0].values.tolist()
# .. search filtypes for all core names,
for one_file_corename in one_subset_corename_list:
# .... now find all filetypes with the same corename (one by one),
one_corename_filetypenames_dict = dict()
for filetype_name in list(filetypes_dict.keys()):
# - get patter used to filnd one filetype,
filetype_pat = filetypes_dict[filetype_name]
# - search for ONE_FILE_NAME
ONE_FILE_NAME = [] # at least ot. shoudl be one !
for file in glob.glob(f"{one_file_corename}*{filetype_pat}"):
ONE_FILE_NAME.append(file)
# - test if you can find only one name, if not the patterns provided are not specifficnc enought
if verbose==True:
if (ONE_FILE_NAME)==0:
print(f"Error - FILE NOT FOUND: {f'{one_file_corename}*{filetype_pat}'}")
if len(ONE_FILE_NAME)==1:
"everything is ok"
pass
if len(ONE_FILE_NAME)>1:
print(f"Error: provided combination of - {file_core_name} - and - {file_affix_pat}- is not speciffic enought !!!")
print("Error: in results more then one file was found and now only the first one will be loaded")
else:
pass
# .. add that file to the duct with assocuated files,
one_corename_filetypenames_dict[filetype_name] = ONE_FILE_NAME[0]
# .... finally, add each group of assicated files wiht the same core file name to filename_table
"ie. build table row"
filename_table_list.append({
"subset_name": subset_name,
"path": path,
**one_corename_filetypenames_dict
})
return pd.DataFrame(filename_table_list)
# Function, ......................................................................
# working version ...... 2020.12.11 ----- finally !!!!!!!!!!!!!!!
# Function, ......................................................................
def pair_files(*, search_patterns, pair_files_with, allow_duplicates_between_subsets=False, verbose=False, track_progres=False):
'''
function to find list speciffic files or pairs or groups of associated files,
eg: batch of images and their labels that can be with different formats and in different locations,
One file type is described with so called corefilename and subset types that will allow to group them,
and search for other, associated files using that cofilename and profided filename prefixes and extensions,
done: 2020.12.10
# inputs
. search_patterns : dict, see example below
. pair_files_with : a type of file that is parired with other filetypes
. allow_duplicates_between_subsets : bool, if True, the function will stop on subset collection,
that assigned the same leading filenames to differetn subsets
. verbose : bool,
. track_progres : bool, like versbose, but sending minimal info on the process going on
# returns:
. dictionary with DataFames : dict, key==Datasubsets collection
values=pd.DataFrame, with paired file_name's and file_path's
and col: subset_name that allows separating different subsets in one df
df, contains also several other values, that can help createing
new derivative files
# Example
search_patterns = {
"all_data":{ # one datasets collection will create one dataframe,
"extracted_features":{
"file_path":PATH_extracted_features,
"file_prefix": f'{module_name}_{dataset_name}_{dataset_name}',
"file_extension": "_encoded.npy",
"file_corename": {
"train": f"_", # this will return several duplicates in train data
"valid": f"_valid_batch",
"test": f"_test_01",
"test_2": f"_test_02" # you may add more then one in a list !
}},
"labels":{
"file_path":None,
"file_prefix": None,
"file_extension": "labels.csv"
},
},
"partial_data":{ # one datasets collection will create one dataframe,
"extracted_features":{
"file_path":PATH_extracted_features,
"file_prefix": f'{module_name}_{dataset_name}_{dataset_name}',
"file_extension": "_encoded.npy",
"file_corename": {
"train": [f"_train_batch01", f"_train_batch02",f"_train_batch03",f"_train_batch03",f"_train_batch03"],
"valid": f"_valid_batch01",
"test": f"_test_01"
}},
"labels":{
"file_path":None,
"file_prefix": None,
"file_extension": "labels.csv"
},
}
}
# .......
df = pair_or_list_files(
search_patterns=search_patterns,
pair_files_with="extracted_features",
verbose=True)
'''
STOP_LOOP = False # if true after some test, function stops execution and returns None
subsets_collection_list = list(search_patterns.keys()) # used separately, and returned as dict with different tables,
compare_all_files_to = pair_files_with # legacy issue, I chnaged the name to make it more informative
paired_filenames_dict = dict() # keys==collection of subsets, values = table with paired filesnames/paths and name of the datasubset
# -------------------------------------------------------------------------------
# create one df table per collection of subsets,
for subsets_collection_name in list(search_patterns.keys()):
if track_progres==True:
print("* Preparing: ", subsets_collection_name, " - from - ", subsets_collection_list)
else:
pass
# -------------------------------------------------------------------------------
# Step 1. search filenames of the first filetype (compare_all_files_to !)
# -------------------------------------------------------------------------------
'''
here the df, is created with all items such as subsets_collection_name, & one_subset_name,
that will allow identifying the file without the ptoblems,
'''
# - list with subset names to loop over,
subset_name_list_in_one_collection = list(search_patterns[subsets_collection_name][compare_all_files_to]["file_corename"].keys())
# - list to store results on one subset collection (one entry == one file)
one_subset_collection_file_list = list()
# - loop over each subset
for i, one_subset_name in enumerate(subset_name_list_in_one_collection):
# **** STEP 1 **** parameters, ,
# .... get variables provided as parameters to the function for one_subset_name,
file_path = search_patterns[subsets_collection_name][compare_all_files_to]["file_path"] # str
file_prefix = search_patterns[subsets_collection_name][compare_all_files_to]["file_prefix"] # str
file_extension = search_patterns[subsets_collection_name][compare_all_files_to]["file_extension"] # str
file_corename = search_patterns[subsets_collection_name][compare_all_files_to]["file_corename"][one_subset_name] # str/list
# .... ensure that corename is a list, (can also be provided as str, with one pattern)
if isinstance(file_corename, str)==True:
file_corename = [file_corename]
else:
pass
# **** STEP 2 **** get filenames,
# .... set dir,
try:
os.chdir(file_path)
except:
if verbose==True:
print(f"ERROR incorrect path provided for {compare_all_files_to}")
else:
pass
# .... identify all files in one subset from that subsets collection
'all files found with all patterns added to the same list'
found_file_name_list = []
for one_file_corename in file_corename:
for file in glob.glob(f"{file_prefix}*{one_file_corename}*{file_extension}"):
found_file_name_list.append(file)
# ... ensure there are no repeats in found_file_name_list
found_file_name_list = pd.Series(found_file_name_list).unique().tolist()
# **** STEP 3 **** get file speciffic corename and place all results in dict in the list
# .... create a file_speciffic_corename
file_speciffic_corename_s = pd.Series(found_file_name_list)
file_speciffic_corename_s = file_speciffic_corename_s.str.replace(file_prefix, "")
file_speciffic_corename_s = file_speciffic_corename_s.str.replace(file_extension, "")
# .... add each file into one_subset_collection_file_list
for file_name, filespeciffic_corename in zip(found_file_name_list, file_speciffic_corename_s):
one_subset_collection_file_list.append({
"subsets_collection_name": subsets_collection_name,
"subset_name": one_subset_name,
f"{compare_all_files_to}_file_name": file_name,
f"{compare_all_files_to}_file_path":file_path,
f"{compare_all_files_to}_file_prefix": file_prefix,
f"{compare_all_files_to}_file_corename":file_corename,
f"{compare_all_files_to}_file_extension":file_extension,
f"{compare_all_files_to}_filespeciffic_corename":filespeciffic_corename,
})
# -------------------------------------------------------------------------------
# Step 2. test if all file_names are unique and were not used in mutiple subsets
# -------------------------------------------------------------------------------
'caution this maybe done intentionally'
# - get all filenames in a given cllection of subsets, - duplicates can be the same files listed for 2 different subsets,
collected_filenames = pd.DataFrame(one_subset_collection_file_list).loc[:, f"{compare_all_files_to}_file_name"]
# - test it all are unique,
if collected_filenames.shape[0]!=len(collected_filenames.unique().tolist()):
if allow_duplicates_between_subsets==False:
STOP_LOOP = True
else:
STOP_LOOP = False # duplicates are not a problem :)
pass
if track_progres==True:
print("ERROR, corename duplicates were detected in", subsets_collection_name, " -> function has been stoppped")
else:
pass
# .... print info with examples and placement of duplicates,
if verbose==True:
# identify and print all filenames that are in more then one subset in one collection,
temp_df =
|
pd.DataFrame(one_subset_collection_file_list)
|
pandas.DataFrame
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
from __future__ import division
import operator
import random
from itertools import product
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import Series
from cudf.core.index import as_index
from cudf.tests import utils
from cudf.utils.dtypes import (
BOOL_TYPES,
DATETIME_TYPES,
FLOAT_TYPES,
INTEGER_TYPES,
TIMEDELTA_TYPES,
)
STRING_TYPES = {"str"}
_binops = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _binops)
def test_series_binop(binop, obj_class):
nelem = 1000
arr1 = utils.gen_rand("float64", nelem) * 10000
# Keeping a low value because CUDA 'pow' has 2 full range error
arr2 = utils.gen_rand("float64", nelem) * 10
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
expect = binop(pd.Series(arr1),
|
pd.Series(arr2)
|
pandas.Series
|
from __future__ import print_function
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import openmdao.api as om
from wisdem.rotorse.rotor import RotorSE, Init_RotorSE_wRefBlade
from wisdem.rotorse.rotor_geometry_yaml import ReferenceBlade
from wisdem.assemblies.fixed_bottom.monopile_assembly_turbine_nodrive import MonopileTurbine
from wisdem.aeroelasticse.FAST_reader import InputReader_Common, InputReader_OpenFAST, InputReader_FAST7
from generateTables import RWT_Tabular
# Global inputs and outputs
ontology_dir = os.path.dirname( os.path.dirname( os.path.realpath(__file__)) ) + os.sep + 'WT_Ontology'
fname_schema = ontology_dir + os.sep + 'IEAontology_schema.yaml'
fname_input = ontology_dir + os.sep + 'IEA-15-240-RWT_FineGrid.yaml'
fname_output = ontology_dir + os.sep + 'IEA-15-240-RWT_out.yaml'
folder_output = os.getcwd() + os.sep + 'outputs'
if not os.path.isdir(folder_output) and rank==0:
os.mkdir(folder_output)
def initialize_problem(Analysis_Level):
# Initialize blade design
refBlade = ReferenceBlade()
refBlade.verbose = True
refBlade.NINPUT = 200
Nsection_Tow = 19
refBlade.NPTS = 200
refBlade.spar_var = ['Spar_cap_ss', 'Spar_cap_ps'] # SS, then PS
refBlade.te_var = 'TE_reinforcement'
refBlade.validate = False
refBlade.fname_schema = fname_schema
blade = refBlade.initialize(fname_input)
FASTpref = {}
FASTpref['Analysis_Level'] = Analysis_Level
# Set FAST Inputs
if Analysis_Level >= 1:
# File management
FASTpref['FAST_ver'] = 'OpenFAST'
FASTpref['dev_branch'] = True
FASTpref['FAST_exe'] = '~/local/bin/openfast'
FASTpref['FAST_directory'] = '../OpenFAST' # Path to fst directory files
FASTpref['FAST_InputFile'] = 'IEA-15-240-RWT.fst' # FAST input file (ext=.fst)
FASTpref['Turbsim_exe'] = '~/local/bin/turbsim'
FASTpref['FAST_namingOut'] = 'IEA-15-240-RWT'
FASTpref['FAST_runDirectory'] = 'temp/' + FASTpref['FAST_namingOut']
# Run Settings
FASTpref['cores'] = 1
FASTpref['debug_level'] = 2 # verbosity: set to 0 for quiet, 1 & 2 for increasing levels of output
# DLCs
FASTpref['DLC_gust'] = None # Max deflection
# FASTpref['DLC_gust'] = RotorSE_DLC_1_4_Rated # Max deflection ### Not in place yet
FASTpref['DLC_extrm'] = None # Max strain
# FASTpref['DLC_extrm'] = RotorSE_DLC_7_1_Steady # Max strain ### Not in place yet
FASTpref['DLC_turbulent'] = None
# FASTpref['DLC_turbulent'] = RotorSE_DLC_1_1_Turb # Alternate turbulent case, replacing rated and extreme DLCs for calculating max deflection and strain
FASTpref['DLC_powercurve'] = None # AEP
# FASTpref['DLC_powercurve'] = None # AEP
# Initialize, read initial FAST files to avoid doing it iteratively
fast = InputReader_OpenFAST(FAST_ver=FASTpref['FAST_ver'], dev_branch=FASTpref['dev_branch'])
fast.FAST_InputFile = FASTpref['FAST_InputFile']
fast.FAST_directory = FASTpref['FAST_directory']
fast.execute()
fst_vt = fast.fst_vt
else:
fst_vt = {}
prob = om.Problem()
prob.model=MonopileTurbine(RefBlade=blade, Nsection_Tow=Nsection_Tow, VerbosityCosts=False, FASTpref=FASTpref)
prob.model.nonlinear_solver = om.NonlinearRunOnce()
prob.model.linear_solver = om.DirectSolver()
return prob, blade, fst_vt
def initialize_variables(prob, blade, Analysis_Level, fst_vt):
# Initialize variable inputs
prob.setup()
prob = Init_RotorSE_wRefBlade(prob, blade, Analysis_Level = Analysis_Level, fst_vt = fst_vt)
# Environmental parameters for the tower
prob['significant_wave_height'] = 4.52
prob['significant_wave_period'] = 9.45
prob['water_depth'] = 30.
prob['wind_reference_height'] = prob['hub_height'] = 150.
prob['shearExp'] = 0.11
prob['rho'] = 1.225
prob['mu'] = 1.7934e-5
prob['water_density'] = 1025.0
prob['water_viscosity'] = 1.3351e-3
prob['wind_beta'] = prob['wave_beta'] = 0.0
prob['gust_stddev'] = 3
# Steel properties for the tower
prob['material_density'] = 7850.0
prob['E'] = 210e9
prob['G'] = 79.3e9
prob['yield_stress'] = 345e6
prob['soil_G'] = 140e6
prob['soil_nu'] = 0.4
# Design constraints
prob['max_taper_ratio'] = 0.4
prob['min_diameter_thickness_ratio'] = 120.0
# Safety factors
prob['gamma_fatigue'] = 1.755 # (Float): safety factor for fatigue
prob['gamma_f'] = 1.35 # (Float): safety factor for loads/stresses
prob['gamma_m'] = 1.3 # (Float): safety factor for materials
prob['gamma_freq'] = 1.1 # (Float): safety factor for resonant frequencies
prob['gamma_n'] = 1.0
prob['gamma_b'] = 1.1
# Tower
prob['tower_buckling_length'] = 30.0
prob['tower_outfitting_factor'] = 1.07
prob['foundation_height'] = -30.
prob['suctionpile_depth'] = 45.
prob['tower_section_height'] = np.array([5., 5., 5., 5., 5., 5., 5., 5., 5., 13., 13., 13., 13., 13., 13., 13., 13., 13., 12.58244309])
prob['tower_outer_diameter'] = np.array([10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 9.92647687, 9.44319282, 8.83283769, 8.15148167, 7.38976138, 6.90908962, 6.74803581, 6.57231775, 6.5])
prob['tower_wall_thickness'] = np.array([0.05534138, 0.05344902, 0.05150928, 0.04952705, 0.04751736, 0.04551709, 0.0435267, 0.04224176, 0.04105759, 0.0394965, 0.03645589, 0.03377851, 0.03219233, 0.03070819, 0.02910109, 0.02721289, 0.02400931, 0.0208264, 0.02399756])
prob['tower_buckling_length'] = 15.0
prob['transition_piece_mass'] = 100e3
prob['transition_piece_height'] = 15.0
prob['DC'] = 80.0
prob['shear'] = True
prob['geom'] = True
prob['tower_force_discretization'] = 5.0
prob['nM'] = 2
prob['Mmethod'] = 1
prob['lump'] = 0
prob['tol'] = 1e-9
prob['shift'] = 0.0
# Offshore BOS
prob['wtiv'] = 'example_wtiv'
prob['feeder'] = 'future_feeder'
prob['num_feeders'] = 1
prob['oss_install_vessel'] = 'example_heavy_lift_vessel'
prob['site_distance'] = 40.0
prob['site_distance_to_landfall'] = 40.0
prob['site_distance_to_interconnection'] = 40.0
prob['plant_turbine_spacing'] = 7
prob['plant_row_spacing'] = 7
prob['plant_substation_distance'] = 1
prob['tower_deck_space'] = 0.
prob['nacelle_deck_space'] = 0.
prob['blade_deck_space'] = 0.
prob['port_cost_per_month'] = 2e6
prob['monopile_deck_space'] = 0.
prob['transition_piece_deck_space'] = 0.
prob['commissioning_pct'] = 0.01
prob['decommissioning_pct'] = 0.15
prob['project_lifetime'] = prob['lifetime'] = 20.0
prob['number_of_turbines'] = 40
prob['annual_opex'] = 43.56 # $/kW/yr
prob['tower_add_gravity'] = True
# For turbine costs
prob['offshore'] = True
prob['crane'] = False
prob['crane_cost'] = 0.0
prob['labor_cost_rate'] = 3.0
prob['material_cost_rate'] = 2.0
prob['painting_cost_rate'] = 28.8
# Drivetrain
prob['bearing_number'] = 2
prob['tilt'] = 6.0
prob['overhang'] = 10.99
prob['hub_cm'] = np.array([-10.604, 0.0, 5.462])
prob['nac_cm'] = np.array([-3.946, 0.0, 3.538])
prob['hub_I'] = np.array([1382171.187, 2169261.099, 2160636.794, 0.0, 0.0, 0.0])
prob['nac_I'] = np.array([7918328., 4751108., 5314813., 0.0, 0.0, 0.0])
prob['hub_mass'] = 190e3
prob['nac_mass'] = 6.309e5
prob['hss_mass'] = 0.0
prob['lss_mass'] = 15734.0
prob['cover_mass'] = 0.0
prob['pitch_system_mass'] = 0.0
prob['platforms_mass'] = 11393 + 2*1973.
prob['spinner_mass'] = 0.0
prob['transformer_mass'] = 50e3
prob['vs_electronics_mass'] = 0.0
prob['yaw_mass'] = 100e3
prob['gearbox_mass'] = 0.0
prob['generator_mass'] = 226628.6 + 144963.1
prob['bedplate_mass'] = 70328.7
prob['main_bearing_mass'] = 5664
return prob
def run_problem(prob):
# Run initial condition no matter what
print('Running at Initial Position:')
prob['gust_stddev'] = 0
prob.run_model()
print('########################################')
print('')
print('Control variables')
print('Rotor diam: {:8.3f} m'.format(prob['diameter'][0]))
print('TSR: {:8.3f} -'.format(prob['control_tsr'][0]))
print('Rated vel: {:8.3f} m/s'.format(prob['rated_V'][0]))
print('Rated rpm: {:8.3f} rpm'.format(prob['rated_Omega'][0]))
print('Rated pitch: {:8.3f} deg'.format(prob['rated_pitch'][0]))
print('Rated thrust: {:8.3f} N'.format(prob['rated_T'][0]))
print('Rated torque: {:8.3f} N-m'.format(prob['rated_Q'][0]))
print('')
print('Constraints')
print('Max TD: {:8.3f} m'.format(prob['tip_deflection'][0]))
print('TD ratio: {:8.3f} -'.format(prob['tip_deflection_ratio'][0]))
print('Blade root M: {:8.3f} N-m'.format(prob['root_bending_moment'][0]))
print('')
print('Objectives')
print('AEP: {:8.3f} GWh'.format(prob['AEP'][0]))
print('LCoE: {:8.4f} $/MWh'.format(prob['lcoe'][0]))
print('')
print('Blades')
print('Blade mass: {:8.3f} kg'.format(prob['mass_one_blade'][0]))
print('Blade cost: {:8.3f} $'.format(prob['total_blade_cost'][0]))
print('Blade freq: {:8.3f} Hz'.format(prob['freq_curvefem'][0]))
print('3 blade M_of_I: ', prob['I_all_blades'], ' kg-m^2')
print('Hub F: ', 1e-3*prob['Fxyz_total'], ' kN')
print('Hub M: ', 1e-3*prob['Mxyz_total'], ' kNm')
print('')
print('RNA Summary')
print('RNA mass: {:8.3f} kg'.format(prob['tow.pre.mass'][0]))
print('RNA C_of_G (TT): ', prob['rna_cg'], ' m')
print('RNA M_of_I: ', prob['tow.pre.mI'], ' kg-m^2')
print('')
print('Tower')
print('Tower top F: ', prob['tow.pre.rna_F'], ' N')
print('Tower top M: ', prob['tow.pre.rna_M'], ' N-m')
print('Tower freqs: ', prob['tow.post.structural_frequencies'], ' Hz')
print('Tower vel: {:8.3f} kg'.format(prob['tow.wind.Uref'][0]))
print('Tower mass: {:8.3f} kg'.format(prob['tower_mass'][0]))
print('Tower cost: {:8.3f} $'.format(prob['tower_cost'][0]))
print('Monopile mass: {:8.3f} kg'.format(prob['monopile_mass'][0]))
print('Monopile cost: {:8.3f} $'.format(prob['monopile_cost'][0]))
print('########################################')
# Complete data dump
#prob.model.list_inputs(units=True)
#prob.model.list_outputs(units=True)
return prob
def postprocess(prob, blade):
def format_save(fig, fig_name):
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.grid(color=[0.8,0.8,0.8], linestyle='--')
plt.subplots_adjust(bottom = 0.15, left = 0.15)
fig.savefig(folder_output + os.sep + fig_name+'.pdf', pad_inches=0.1, bbox_inches='tight')
fig.savefig(folder_output + os.sep + fig_name+'.png', pad_inches=0.1, bbox_inches='tight')
# Problem initialization
var_y = ['chord','theta','rthick','p_le','precurve','presweep']
label_y = ['Chord [m]', 'Twist [deg]', 'Relative Thickness [%]', 'Pitch Axis Chord Location [%]', 'Prebend [m]', 'Sweep [m]']
scaling_factor = [1. , 1. , 100. , 100., 1., 1.]
figsize=(5.3, 4)
fig = plt.figure(figsize=figsize)
for i in range(len(var_y)):
fig.clf()
ax = fig.add_subplot(111)
ax.plot(blade['pf']['r'], blade['pf'][var_y[i]] * scaling_factor[i], 'k', linewidth=2)
plt.xlabel('Blade Span [m]', fontsize=14, fontweight='bold')
plt.ylabel(label_y[i], fontsize=14, fontweight='bold')
fig_name = var_y[i] + '_dimensional'
format_save(fig, fig_name)
fig.clf()
ax = fig.add_subplot(111)
ax.plot(blade['pf']['s'], blade['pf'][var_y[i]] * scaling_factor[i], 'k', linewidth=2)
plt.xlabel('Nondimensional Blade Span (r/R)', fontsize=14, fontweight='bold')
plt.ylabel(label_y[i], fontsize=14, fontweight='bold')
fig_name = var_y[i] + '_nondimensional'
format_save(fig, fig_name)
# Pitch
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['pitch'], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Pitch Angle [deg]', fontsize=14, fontweight='bold')
fig_name = 'pitch'
format_save(fig, fig_name)
# Power curve
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['P'] * 1.00e-006, linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Electrical Power [MW]', fontsize=14, fontweight='bold')
plt.yticks(np.arange(16))
fig_name = 'power'
format_save(fig, fig_name)
# ELEC Coefficient of Power curve
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Cp'], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Power Coefficient', fontsize=14, fontweight='bold')
plt.yticks(1e-2*np.arange(0, 51, 5))
fig_name = 'coefficient_power'
format_save(fig, fig_name)
# AERO Coefficient of Power curve
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Cp_aero'], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Power Coefficient', fontsize=14, fontweight='bold')
plt.yticks(1e-2*np.arange(0, 51, 5))
fig_name = 'coefficient_power_aero'
format_save(fig, fig_name)
# Omega
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Omega'], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Rotor Speed [rpm]', fontsize=14, fontweight='bold')
fig_name = 'omega'
format_save(fig, fig_name)
# Tip speed
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Omega'] * np.pi / 30. * prob['r'][-1], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Blade Tip Speed [m/s]', fontsize=14, fontweight='bold')
fig_name = 'tip_speed'
format_save(fig, fig_name)
# Thrust
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['T'] * 1.00e-006, linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Rotor Thrust [MN]', fontsize=14, fontweight='bold')
fig_name = 'thrust'
format_save(fig, fig_name)
# Coefficient Thrust
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Ct_aero'], linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Thrust Coefficient', fontsize=14, fontweight='bold')
plt.yticks(1e-1*np.arange(0, 8.1))
fig_name = 'coefficient_thrust'
format_save(fig, fig_name)
# Torque
fig.clf()
ax = fig.add_subplot(111)
ax.plot(prob['V'], prob['Q'] * 1.00e-006, linewidth=2)
plt.xlabel('Wind [m/s]', fontsize=14, fontweight='bold')
plt.ylabel('Rotor Torque [MNm]', fontsize=14, fontweight='bold')
fig_name = 'torque'
format_save(fig, fig_name)
# Tabular output: Blade
temp = np.c_[blade['pf']['s'], blade['pf']['r']]
for iy,y in enumerate(var_y):
temp = np.c_[temp, blade['pf'][y]*scaling_factor[iy]]
bladeDF =
|
pd.DataFrame(data=temp, columns=['Blade Span','Rotor Coordinate [m]'] + label_y)
|
pandas.DataFrame
|
import json
import os
import sqlite3 as sql
import time
from math import sqrt
import numpy as np
import pandas as pd
import requests
import streamlit as st
import src.frontend.page_handling as page_handling
from src.frontend.dataset import DatasetWrapper
from .SessionState import session_get
from .util import timer
import os
import math
def is_item(item, all_items):
if item == 0:
return False
if "into" in all_items[str(item)]:
return all_items[str(item)]["into"] == []
if all_items[str(item)]["gold"]["purchasable"] == True:
return all_items[str(item)]["gold"]["total"] != 0
return False
"""
Taken from https://www.mikulskibartosz.name/wilson-score-in-python-example
"""
def confidence_wilson_score(p, n, z=1.44):
denominator = 1 + z ** 2 / n
centre_adjusted_probability = p + z * z / (2 * n)
adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)
lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def add_item(lst, lst2, lst3, itm, counter, rating):
if itm != 0:
lst.append(itm)
lst2.append(counter)
lst3.append(rating)
def add_item_full(lst, items, counter, win, win_items, thresh, all_items):
for x in ["item0", "item1", "item2", "item3", "item4", "item5", "item6"]:
if not is_item(items[x], all_items):
continue
if win_items[items[x]][1] > thresh:
add_item(lst["itemID"], lst["userID"], lst["rating"], items[x], counter,
1 - (win_items[items[x]][0] / win_items[items[x]][1]))
else:
add_item(lst["itemID"], lst["userID"], lst["rating"], items[x], counter, 1)
def get_leading(average_win_rate, p):
if average_win_rate < p:
return 1
return -1
def coldcase_prediction(items, current_dict, rating, all_items):
for x in items:
if x != 0:
if x in current_dict:
current_dict[x] = (current_dict[x][0] + rating, current_dict[x][1] + 1)
else:
current_dict[x] = (rating, 1)
"""Taken from https://stackoverflow.com/a/16696317"""
def download_file(url, local, stream=True, verify=False):
local_filename = local
prog = st.progress(0)
st.write("Downloading database from public URL")
# NOTE the stream=True parameter below
with requests.get(url, stream=stream, verify=verify) as r:
r.raise_for_status()
total_size = 0
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=65565):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
total_size += len(chunk)
prog.progress(int(total_size * 100 / 1000000000))
prog.progress(100)
return local_filename
@timer
def app():
with st.sidebar:
if st.button("Back"):
page_handling.handler.set_page("menu")
return
state = session_get()
st.title("The dataset")
st.write(
"The dataset was created by crawling through the official Game API. Some minimal filtering have been applied to the dataset to clean it up and increase significance. Since the game is subject to frequent patches and balance updates, the data is highly volatile and significance declines rather fast. Therefore only games played in the last 100 days have been considered as input.")
if not os.path.exists("data/lol_dataset/league_of_legends.db"):
download_file("https://www.kava-i.de/league_of_legends.db", "data/lol_dataset/league_of_legends.db")
if not os.path.exists("data/lol_dataset/champion.json"):
download_file("http://ddragon.leagueoflegends.com/cdn/11.11.1/data/en_US/champion.json",
"data/lol_dataset/champion.json")
if not os.path.exists("data/lol_dataset/item.json"):
download_file("http://ddragon.leagueoflegends.com/cdn/11.11.1/data/en_US/item.json",
"data/lol_dataset/items.json")
connection = sql.connect("data/lol_dataset/league_of_legends.db")
cursor = connection.cursor()
total_games = 0
max_game_length = 0
min_game_creation = 0
for row in cursor.execute("SELECT MAX(game_duration) from matches"):
max_game_length = row[0]
for row in cursor.execute("SELECT MIN(creation_time) from matches"):
min_game_creation = row[0]
min_game_creation = -math.floor((int(time.time()*1000) - min_game_creation)/(1000*60*60*24))-1
ok_side_form = st.sidebar.form(key='my_form')
game_length_min = ok_side_form.slider("Select the game length in minutes:",min_value=0,max_value=int(max_game_length/60),value=(0,10))
sample_range_days = ok_side_form.slider("Select the sample range in days:",min_value=min_game_creation,max_value=0,value=(min_game_creation,0))
multi = ok_side_form.multiselect("Selects the region in the world from which to sample the games",["EU West","EU Nord", "Nord America", "Russia","Latein America 1","Latein America 2"],["EU West","EU Nord", "Nord America", "Russia","Latein America 1","Latein America 2"],key="Unique3")
inv_mappings = { "https://euw1.api.riotgames.com":"EU West", "https://eun1.api.riotgames.com": "EU Nord" , "https://na1.api.riotgames.com":"Nord America", "https://la1.api.riotgames.com":"Latein America 1", "https://la2.api.riotgames.com":"Latein America 2","https://ru.api.riotgames.com":"Russia"}
for row in cursor.execute("SELECT COUNT(champion_id) from matches"):
total_games = row[0]
epoch_time = int(time.time()*1000)
mappings = {"EU West": "https://euw1.api.riotgames.com", "EU Nord": "https://eun1.api.riotgames.com", "Nord America": "https://na1.api.riotgames.com", "Latein America 1":"https://la1.api.riotgames.com", "Latein America 2": "https://la2.api.riotgames.com","Russia":"https://ru.api.riotgames.com"}
execute_string = '('
for x in multi:
if execute_string != '(':
execute_string+=" OR "
execute_string += "idx = \""+mappings[x]+"\""
execute_string+=")"
filtered = 0
cursor.execute("DROP VIEW IF EXISTS filtered_matches")
cursor.execute("CREATE VIEW filtered_matches AS SELECT * from matches WHERE game_duration >= {} AND game_duration <= {} AND creation_time >= {} AND creation_time <= {} AND {}".format(game_length_min[0]*60,game_length_min[1]*60,(epoch_time+sample_range_days[0]*60*60*24*1000),epoch_time+sample_range_days[1]*60*60*24*1000,execute_string))
data = []
data_2 = []
for row in cursor.execute("SELECT idx,COUNT(*) from filtered_matches GROUP BY idx"):
data.append(row[1])
data_2.append(inv_mappings[row[0]])
region = pd.DataFrame(data,index=data_2,columns=["Data points"])
st.bar_chart(region)
js = json.loads(open("src/lol_dataset/champion.json", "rb").read().decode(encoding='utf-8'))
champions = []
champion_dict = {}
for x in js["data"]:
champions.append(x)
champion_dict[x] = (int(js["data"][x]["key"]), js["data"][x]["blurb"])
data = []
data_2 = []
for row in cursor.execute("SELECT champion_id,COUNT(*) from filtered_matches GROUP BY champion_id ORDER BY Count(*) DESC LIMIT 20"):
data.append(row[1])
for x in champion_dict:
if row[0] == champion_dict[x][0]:
data_2.append(x)
champs = pd.DataFrame(data,index=data_2,columns=["Data points"])
st.bar_chart(champs)
data = []
data_2 = []
for row in cursor.execute("SELECT champion_id,COUNT(*) from filtered_matches GROUP BY champion_id ORDER BY Count(*) ASC LIMIT 20"):
data.append(row[1])
for x in champion_dict:
if row[0] == champion_dict[x][0]:
data_2.append(x)
champs = pd.DataFrame(data,index=data_2,columns=["Data points"])
st.bar_chart(champs)
for row in cursor.execute("SELECT COUNT(champion_id) from filtered_matches"):
st.write("A total of ",row[0]," games have been found in the database with the given filters.")
filtered = row[0]
st.write("This is ",round(100*filtered/total_games,2),"% of the whole dataset.")
st.title("Showcase")
col1, col2, col3 = st.beta_columns([1,1,1])
create_constrains = ""
col1, col3, col2 = st.beta_columns([3, 1, 3])
options = col1.selectbox('Select your champion', champions)
options2 = col2.selectbox('Select the enemy champion', champions)
lore_url = "http://ddragon.leagueoflegends.com/cdn/11.11.1/data/en_US/champion/{}.json".format(options)
lore_enemy_url = "http://ddragon.leagueoflegends.com/cdn/11.11.1/data/en_US/champion/{}.json".format(options2)
lore_own = json.loads(requests.get(lore_url).text)
lore_enemy = json.loads(requests.get(lore_enemy_url).text)
all_items = json.loads(open('src/lol_dataset/items.json').read())["data"]
item_dict = {}
all_item_names = []
for i in all_items:
all_items[i]["base_id"] = i
item_dict[i] = all_items[i]
item_dict[all_items[i]["name"]] = all_items[i]
all_item_names.append(all_items[i]["name"])
col1.image("http://ddragon.leagueoflegends.com/cdn/img/champion/loading/{}_0.jpg".format(options),
use_column_width="always")
with col1.beta_expander("See hero description"):
st.write(lore_own["data"][options]["lore"])
col2.image("http://ddragon.leagueoflegends.com/cdn/img/champion/loading/{}_0.jpg".format(options2),
use_column_width="always")
with col2.beta_expander("See hero description"):
st.write(lore_enemy["data"][options2]["lore"])
own_champ = int(champion_dict[options][0])
other_champ = int(champion_dict[options2][0])
counter = 0
anti_counter = 0
start_time = 0
duration = 0
prog = st.progress(0)
col1, col2, col3 = st.beta_columns([1, 4, 1])
frames = {"userID": [], "itemID": [], "rating": []}
winning_items = {}
running_counter = 0
with col2.empty():
prog.progress(0)
for row in cursor.execute("SELECT e.champion_id, e.items FROM filtered_matches e INNER JOIN filtered_matches m ON m.game_id = e.game_id WHERE e.win = 1 AND m.win=0 AND e.champion_id="+str(own_champ)+" AND m.champion_id="+str(other_champ)):
counter+=1
items = json.loads(row[1])
coldcase_prediction([items["item0"],items["item1"],items["item2"],items["item3"],items["item4"],items["item5"],items["item6"]],winning_items,1,all_items)
for row in cursor.execute("SELECT m.champion_id, e.items FROM filtered_matches e INNER JOIN filtered_matches m ON m.game_id = e.game_id WHERE e.win = 0 AND m.win=1 AND e.champion_id="+str(own_champ)+" AND m.champion_id="+str(other_champ)):
anti_counter+=1
items = json.loads(row[1])
coldcase_prediction(
[items["item0"], items["item1"], items["item2"], items["item3"], items["item4"], items["item5"],
items["item6"]], winning_items, 0, all_items)
prog.progress(100)
val = 0
if (counter + anti_counter) != 0:
winning_list = []
for x in winning_items:
if winning_items[x][1] >= val:
if not is_item(x, item_dict):
continue
estimated_p = winning_items[x][0] / winning_items[x][1]
cost = item_dict[str(x)]["gold"]["total"]
winning_list.append((estimated_p, x, cost))
games = []
for x in range(counter):
games.append(1)
for x in range(anti_counter):
games.append(0)
list_items_filtered = []
list_items_names = []
for x in winning_list:
list_items_filtered.append(x[0])
list_items_names.append(item_dict[str(x[1])]["name"])
outcomes = np.array(games)
name = ok_side_form.text_input("Name for the dataset:",key="Unique4")
submit_button2x = ok_side_form.form_submit_button(label='Apply')
submit_button = st.sidebar.button(label='Add to dataset')
if submit_button:
running_counter = 0
for row in cursor.execute("SELECT DISTINCT e.items, e.summoner_name, e.game_id FROM matches e INNER JOIN matches m ON m.game_id = e.game_id WHERE e.win = 1 AND m.win=0 AND e.champion_id="+str(own_champ)+" AND m.champion_id="+str(other_champ)):
running_counter+=1
items = json.loads(row[0])
add_item_full(frames,items,str(running_counter)+" | "+row[1],True,winning_items,val,item_dict)
for x in range(len(frames["itemID"])):
idx = item_dict[str(frames["itemID"][x])]["name"]
frames["itemID"][x] = idx
for x in range(len(frames["userID"])):
idx = item_dict[str(frames["itemID"][x])]["name"]
frames["itemID"][x] = idx
df =
|
pd.DataFrame(frames)
|
pandas.DataFrame
|
# Author: <NAME>
# Date: 04/25/2019
# Git-Hub: Data-is-Life
from bs4 import BeautifulSoup
import gc
from numpy import where
from numpy import mean as NpM
from pandas import to_timedelta as ToTd
from pandas import to_datetime as ToDt
from pandas import to_numeric as ToNm
from pandas import DataFrame as DF
from pandas import concat
gc.enable()
class CleanerHelper(object):
def __init__(self):
pass
def reset_drop(self, df):
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
return df
def sort_drop(self, df):
df.sort_values(by=['start_date', 'end_date'], inplace=True)
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
return df
class TrimData(object):
def __init__(self, soup_obj, speed_unit, measure_unit):
super(TrimData, self).__init__()
self.soup_obj = soup_obj
self.speed_unit = speed_unit
self.measure_unit = measure_unit
self.one_day = ToTd(1, 'D')
self.two_days = ToTd(2, 'D')
self.twh = 24 - 1e-6
self.ch = CleanerHelper()
def parse_soup(self):
self.df = DF()
'''Insert the values in the original dataframe. Convert time to
numerical format to make calculations easier.'''
self.df.loc[:, 'start_date'] = [d['startDate'] for d in self.soup_obj]
self.df.loc[:, 'end_date'] = [d['endDate'] for d in self.soup_obj]
self.df.loc[:, self.measure_unit] = [d['value'] for d in self.soup_obj]
self.df.loc[:, 'source'] = [d['sourceName'] for d in self.soup_obj]
self.df.loc[
:, self.measure_unit] = self.df[self.measure_unit].astype(float)
return self.df
def clean_set(self):
'''Split start and end date, time, and timezone to a new df. Drop the
Timezone column.'''
start_date_df = self.df.start_date.str.split(
' ', expand=True).drop(columns=[2])
end_date_df = self.df.end_date.str.split(
' ', expand=True).drop(columns=[2])
# Merge the start and end date & time to a single value.
start_date_df.loc[:, 'std'] = start_date_df[0] + ' ' + start_date_df[1]
end_date_df.loc[:, 'etd'] = end_date_df[0] + ' ' + end_date_df[1]
# Convert the date to `datetime` and time to `timedelta`
start_date_df.loc[:, 'sd'] = ToDt(start_date_df[0], format='%Y-%m-%d')
end_date_df.loc[:, 'ed'] = ToDt(end_date_df[0], format='%Y-%m-%d')
start_date_df.loc[:, 'st'] = ToTd(start_date_df[1])
end_date_df.loc[:, 'et'] =
|
ToTd(end_date_df[1])
|
pandas.to_timedelta
|
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha048 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha048 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha059 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha059 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha079 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha079 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha080 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha080 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha097 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha097 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha097 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (LOW * 0.721001) + (VWAP * (1 - 0.721001))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA097_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA097_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha097.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#rank(((((close - low) - (high -close)) / (high - low)) * volume))
def IndustryAverage_PreparationForAlpha100_1():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_1 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_1 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_1 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
HIGH = quotations_daily_chosen['HIGH']
LOW = quotations_daily_chosen['LOW']
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = rank(((((CLOSE - LOW) - (HIGH -CLOSE)) / (HIGH - LOW)) * VOLUME))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_1_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_1_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_1.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30)))
def IndustryAverage_PreparationForAlpha100_2():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha100_2 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha100_2 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha100_2 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0] -30
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
VOLUME = quotations_daily_chosen['VOL']*100
adv20 = sma(VOLUME, 30)
result_unaveraged_piece = (correlation(CLOSE, rank(adv20), 5) - rank(ts_argmin(CLOSE, 30)))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA100_2_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA100_2_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha100_2.csv",encoding='utf-8-sig')
return result_industryaveraged_df
class Alphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
# Alpha#1 (rank(Ts_ArgMax(SignedPower(((returns < 0) ? stddev(returns, 20) : close), 2.), 5)) -0.5)
def alpha001(self):
inner = self.close
inner[self.returns < 0] = stddev(self.returns, 20)
alpha = rank(ts_argmax(inner ** 2, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#2 (-1 * correlation(rank(delta(log(volume), 2)), rank(((close - open) / open)), 6))
def alpha002(self):
df = -1 * correlation(rank(delta(log(self.volume), 2)), rank((self.close - self.open) / self.open), 6)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#3 (-1 * correlation(rank(open), rank(volume), 10))
def alpha003(self):
df = -1 * correlation(rank(self.open), rank(self.volume), 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#4 (-1 * Ts_Rank(rank(low), 9))
def alpha004(self):
alpha = -1 * ts_rank(rank(self.low), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#5 (rank((open - (sum(vwap, 10) / 10))) * (-1 * abs(rank((close - vwap)))))
def alpha005(self):
alpha = (rank((self.open - (sum(self.vwap, 10) / 10))) * (-1 * abs(rank((self.close - self.vwap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#6 (-1 * correlation(open, volume, 10))
def alpha006(self):
df = -1 * correlation(self.open, self.volume, 10)
alpha = df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#7 ((adv20 < volume) ? ((-1 * ts_rank(abs(delta(close, 7)), 60)) * sign(delta(close, 7))) : (-1* 1))
def alpha007(self):
adv20 = sma(self.volume, 20)
alpha = -1 * ts_rank(abs(delta(self.close, 7)), 60) * sign(delta(self.close, 7))
alpha[adv20 >= self.volume] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#8 (-1 * rank(((sum(open, 5) * sum(returns, 5)) - delay((sum(open, 5) * sum(returns, 5)),10))))
def alpha008(self):
alpha = -1 * (rank(((ts_sum(self.open, 5) * ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5) * ts_sum(self.returns, 5)), 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#9 ((0 < ts_min(delta(close, 1), 5)) ? delta(close, 1) : ((ts_max(delta(close, 1), 5) < 0) ?delta(close, 1) : (-1 * delta(close, 1))))
def alpha009(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 5) > 0
cond_2 = ts_max(delta_close, 5) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#10 rank(((0 < ts_min(delta(close, 1), 4)) ? delta(close, 1) : ((ts_max(delta(close, 1), 4) < 0)? delta(close, 1) : (-1 * delta(close, 1)))))
def alpha010(self):
delta_close = delta(self.close, 1)
cond_1 = ts_min(delta_close, 4) > 0
cond_2 = ts_max(delta_close, 4) < 0
alpha = -1 * delta_close
alpha[cond_1 | cond_2] = delta_close
return alpha[self.start_date_index:self.end_date_index]
# Alpha#11 ((rank(ts_max((vwap - close), 3)) + rank(ts_min((vwap - close), 3))) *rank(delta(volume, 3)))
def alpha011(self):
alpha = ((rank(ts_max((self.vwap - self.close), 3)) + rank(ts_min((self.vwap - self.close), 3))) *rank(delta(self.volume, 3)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#12 (sign(delta(volume, 1)) * (-1 * delta(close, 1)))
def alpha012(self):
alpha = sign(delta(self.volume, 1)) * (-1 * delta(self.close, 1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#13 (-1 * rank(covariance(rank(close), rank(volume), 5)))
def alpha013(self):
alpha = -1 * rank(covariance(rank(self.close), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#14 ((-1 * rank(delta(returns, 3))) * correlation(open, volume, 10))
def alpha014(self):
df = correlation(self.open, self.volume, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * rank(delta(self.returns, 3)) * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#15 (-1 * sum(rank(correlation(rank(high), rank(volume), 3)), 3))
def alpha015(self):
df = correlation(rank(self.high), rank(self.volume), 3)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_sum(rank(df), 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#16 (-1 * rank(covariance(rank(high), rank(volume), 5)))
def alpha016(self):
alpha = -1 * rank(covariance(rank(self.high), rank(self.volume), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#17 (((-1 * rank(ts_rank(close, 10))) * rank(delta(delta(close, 1), 1))) *rank(ts_rank((volume / adv20), 5)))
def alpha017(self):
adv20 = sma(self.volume, 20)
alpha = -1 * (rank(ts_rank(self.close, 10)) * rank(delta(delta(self.close, 1), 1)) * rank(ts_rank((self.volume / adv20), 5)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#18 (-1 * rank(((stddev(abs((close - open)), 5) + (close - open)) + correlation(close, open,10))))
def alpha018(self):
df = correlation(self.close, self.open, 10)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank((stddev(abs((self.close - self.open)), 5) + (self.close - self.open)) + df))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#19 ((-1 * sign(((close - delay(close, 7)) + delta(close, 7)))) * (1 + rank((1 + sum(returns,250)))))
def alpha019(self):
alpha = ((-1 * sign((self.close - delay(self.close, 7)) + delta(self.close, 7))) * (1 + rank(1 + ts_sum(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#20 (((-1 * rank((open - delay(high, 1)))) * rank((open - delay(close, 1)))) * rank((open -delay(low, 1))))
def alpha020(self):
alpha = -1 * (rank(self.open - delay(self.high, 1)) * rank(self.open - delay(self.close, 1)) * rank(self.open - delay(self.low, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#21 ((((sum(close, 8) / 8) + stddev(close, 8)) < (sum(close, 2) / 2)) ? (-1 * 1) : (((sum(close,2) / 2) < ((sum(close, 8) / 8) - stddev(close, 8))) ? 1 : (((1 < (volume / adv20)) || ((volume /adv20) == 1)) ? 1 : (-1 * 1))))
def alpha021(self):
cond_1 = sma(self.close, 8) + stddev(self.close, 8) < sma(self.close, 2)
cond_2 = sma(self.volume, 20) / self.volume < 1
alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index)
#alpha = pd.DataFrame(np.ones_like(self.close), index=self.close.index, columns=self.close.columns)
alpha[cond_1 | cond_2] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#22 (-1 * (delta(correlation(high, volume, 5), 5) * rank(stddev(close, 20))))
def alpha022(self):
df = correlation(self.high, self.volume, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * delta(df, 5) * rank(stddev(self.close, 20))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#23 (((sum(high, 20) / 20) < high) ? (-1 * delta(high, 2)) : 0)
def alpha023(self):
cond = sma(self.high, 20) < self.high
alpha = pd.DataFrame(np.zeros_like(self.close),index=self.close.index,columns=['close'])
alpha.at[cond,'close'] = -1 * delta(self.high, 2).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#24 ((((delta((sum(close, 100) / 100), 100) / delay(close, 100)) < 0.05) ||((delta((sum(close, 100) / 100), 100) / delay(close, 100)) == 0.05)) ? (-1 * (close - ts_min(close,100))) : (-1 * delta(close, 3)))
def alpha024(self):
cond = delta(sma(self.close, 100), 100) / delay(self.close, 100) <= 0.05
alpha = -1 * delta(self.close, 3)
alpha[cond] = -1 * (self.close - ts_min(self.close, 100))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#25 rank(((((-1 * returns) * adv20) * vwap) * (high - close)))
def alpha025(self):
adv20 = sma(self.volume, 20)
alpha = rank(((((-1 * self.returns) * adv20) * self.vwap) * (self.high - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#26 (-1 * ts_max(correlation(ts_rank(volume, 5), ts_rank(high, 5), 5), 3))
def alpha026(self):
df = correlation(ts_rank(self.volume, 5), ts_rank(self.high, 5), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * ts_max(df, 3)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#27 ((0.5 < rank((sum(correlation(rank(volume), rank(vwap), 6), 2) / 2.0))) ? (-1 * 1) : 1)
def alpha027(self): #there maybe problems
alpha = rank((sma(correlation(rank(self.volume), rank(self.vwap), 6), 2) / 2.0))
alpha[alpha > 0.5] = -1
alpha[alpha <= 0.5]=1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#28 scale(((correlation(adv20, low, 5) + ((high + low) / 2)) - close))
def alpha028(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = scale(((df + ((self.high + self.low) / 2)) - self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#29 (min(product(rank(rank(scale(log(sum(ts_min(rank(rank((-1 * rank(delta((close - 1),5))))), 2), 1))))), 1), 5) + ts_rank(delay((-1 * returns), 6), 5))
def alpha029(self):
alpha = (ts_min(rank(rank(scale(log(ts_sum(rank(rank(-1 * rank(delta((self.close - 1), 5)))), 2))))), 5) + ts_rank(delay((-1 * self.returns), 6), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#30 (((1.0 - rank(((sign((close - delay(close, 1))) + sign((delay(close, 1) - delay(close, 2)))) +sign((delay(close, 2) - delay(close, 3)))))) * sum(volume, 5)) / sum(volume, 20))
def alpha030(self):
delta_close = delta(self.close, 1)
inner = sign(delta_close) + sign(delay(delta_close, 1)) + sign(delay(delta_close, 2))
alpha = ((1.0 - rank(inner)) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#31 ((rank(rank(rank(decay_linear((-1 * rank(rank(delta(close, 10)))), 10)))) + rank((-1 *delta(close, 3)))) + sign(scale(correlation(adv20, low, 12))))
def alpha031(self):
adv20 = sma(self.volume, 20)
df = correlation(adv20, self.low, 12).replace([-np.inf, np.inf], 0).fillna(value=0)
p1=rank(rank(rank(decay_linear((-1 * rank(rank(delta(self.close, 10)))), 10))))
p2=rank((-1 * delta(self.close, 3)))
p3=sign(scale(df))
alpha = p1.CLOSE+p2+p3
return alpha[self.start_date_index:self.end_date_index]
# Alpha#32 (scale(((sum(close, 7) / 7) - close)) + (20 * scale(correlation(vwap, delay(close, 5),230))))
def alpha032(self):
alpha = scale(((sma(self.close, 7) / 7) - self.close)) + (20 * scale(correlation(self.vwap, delay(self.close, 5),230)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#33 rank((-1 * ((1 - (open / close))^1)))
def alpha033(self):
alpha = rank(-1 + (self.open / self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#34 rank(((1 - rank((stddev(returns, 2) / stddev(returns, 5)))) + (1 - rank(delta(close, 1)))))
def alpha034(self):
inner = stddev(self.returns, 2) / stddev(self.returns, 5)
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = rank(2 - rank(inner) - rank(delta(self.close, 1)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#35 ((Ts_Rank(volume, 32) * (1 - Ts_Rank(((close + high) - low), 16))) * (1 -Ts_Rank(returns, 32)))
def alpha035(self):
alpha = ((ts_rank(self.volume, 32) * (1 - ts_rank(self.close + self.high - self.low, 16))) * (1 - ts_rank(self.returns, 32)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#36 (((((2.21 * rank(correlation((close - open), delay(volume, 1), 15))) + (0.7 * rank((open- close)))) + (0.73 * rank(Ts_Rank(delay((-1 * returns), 6), 5)))) + rank(abs(correlation(vwap,adv20, 6)))) + (0.6 * rank((((sum(close, 200) / 200) - open) * (close - open)))))
def alpha036(self):
adv20 = sma(self.volume, 20)
alpha = (((((2.21 * rank(correlation((self.close - self.open), delay(self.volume, 1), 15))) + (0.7 * rank((self.open- self.close)))) + (0.73 * rank(ts_rank(delay((-1 * self.returns), 6), 5)))) + rank(abs(correlation(self.vwap,adv20, 6)))) + (0.6 * rank((((sma(self.close, 200) / 200) - self.open) * (self.close - self.open)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#37 (rank(correlation(delay((open - close), 1), close, 200)) + rank((open - close)))
def alpha037(self):
alpha = rank(correlation(delay(self.open - self.close, 1), self.close, 200)) + rank(self.open - self.close)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#38 ((-1 * rank(Ts_Rank(close, 10))) * rank((close / open)))
def alpha038(self):
inner = self.close / self.open
inner = inner.replace([-np.inf, np.inf], 1).fillna(value=1)
alpha = -1 * rank(ts_rank(self.open, 10)) * rank(inner)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#39 ((-1 * rank((delta(close, 7) * (1 - rank(decay_linear((volume / adv20), 9)))))) * (1 +rank(sum(returns, 250))))
def alpha039(self):
adv20 = sma(self.volume, 20)
alpha = ((-1 * rank(delta(self.close, 7) * (1 - rank(decay_linear((self.volume / adv20), 9).CLOSE)))) * (1 + rank(sma(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#40 ((-1 * rank(stddev(high, 10))) * correlation(high, volume, 10))
def alpha040(self):
alpha = -1 * rank(stddev(self.high, 10)) * correlation(self.high, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#41 (((high * low)^0.5) - vwap)
def alpha041(self):
alpha = pow((self.high * self.low),0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
# Alpha#42 (rank((vwap - close)) / rank((vwap + close)))
def alpha042(self):
alpha = rank((self.vwap - self.close)) / rank((self.vwap + self.close))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#43 (ts_rank((volume / adv20), 20) * ts_rank((-1 * delta(close, 7)), 8))
def alpha043(self):
adv20 = sma(self.volume, 20)
alpha = ts_rank(self.volume / adv20, 20) * ts_rank((-1 * delta(self.close, 7)), 8)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#44 (-1 * correlation(high, rank(volume), 5))
def alpha044(self):
df = correlation(self.high, rank(self.volume), 5)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * df
return alpha[self.start_date_index:self.end_date_index]
# Alpha#45 (-1 * ((rank((sum(delay(close, 5), 20) / 20)) * correlation(close, volume, 2)) *rank(correlation(sum(close, 5), sum(close, 20), 2))))
def alpha045(self):
df = correlation(self.close, self.volume, 2)
df = df.replace([-np.inf, np.inf], 0).fillna(value=0)
alpha = -1 * (rank(sma(delay(self.close, 5), 20)) * df * rank(correlation(ts_sum(self.close, 5), ts_sum(self.close, 20), 2)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#46 ((0.25 < (((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10))) ?(-1 * 1) : (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < 0) ? 1 :((-1 * 1) * (close - delay(close, 1)))))
def alpha046(self):
inner = ((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)
alpha = (-1 * delta(self.close))
alpha[inner < 0] = 1
alpha[inner > 0.25] = -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#47 ((((rank((1 / close)) * volume) / adv20) * ((high * rank((high - close))) / (sum(high, 5) /5))) - rank((vwap - delay(vwap, 5))))
def alpha047(self):
adv20 = sma(self.volume, 20)
alpha = ((((rank((1 / self.close)) * self.volume) / adv20) * ((self.high * rank((self.high - self.close))) / (sma(self.high, 5) /5))) - rank((self.vwap - delay(self.vwap, 5))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#48 (indneutralize(((correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close), IndClass.subindustry) / sum(((delta(close, 1) / delay(close, 1))^2), 250))
def alpha048(self):
indaverage_data = IndustryAverage_PreparationForAlpha048()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (correlation(delta(self.close, 1), delta(delay(self.close, 1), 1), 250) *delta(self.close, 1)) / self.close
indneutralized_data = unindneutralized_data - indaverage_data
alpha = indneutralized_data / sma(((delta(self.close, 1) / delay(self.close, 1))**2), 250)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#49 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.1)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha049(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.1] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#50 (-1 * ts_max(rank(correlation(rank(volume), rank(vwap), 5)), 5))
def alpha050(self):
alpha = (-1 * ts_max(rank(correlation(rank(self.volume), rank(self.vwap), 5)), 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#51 (((((delay(close, 20) - delay(close, 10)) / 10) - ((delay(close, 10) - close) / 10)) < (-1 *0.05)) ? 1 : ((-1 * 1) * (close - delay(close, 1))))
def alpha051(self):
inner = (((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10))
alpha = (-1 * delta(self.close))
alpha[inner < -0.05] = 1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#52 ((((-1 * ts_min(low, 5)) + delay(ts_min(low, 5), 5)) * rank(((sum(returns, 240) -sum(returns, 20)) / 220))) * ts_rank(volume, 5))
def alpha052(self):
alpha = (((-1 * delta(ts_min(self.low, 5), 5)) * rank(((ts_sum(self.returns, 240) - ts_sum(self.returns, 20)) / 220))) * ts_rank(self.volume, 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#53 (-1 * delta((((close - low) - (high - close)) / (close - low)), 9))
def alpha053(self):
inner = (self.close - self.low).replace(0, 0.0001)
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / inner), 9)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#54 ((-1 * ((low - close) * (open^5))) / ((low - high) * (close^5)))
def alpha054(self):
inner = (self.low - self.high).replace(0, -0.0001)
alpha = -1 * (self.low - self.close) * (self.open ** 5) / (inner * (self.close ** 5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#55 (-1 * correlation(rank(((close - ts_min(low, 12)) / (ts_max(high, 12) - ts_min(low,12)))), rank(volume), 6))
def alpha055(self):
divisor = (ts_max(self.high, 12) - ts_min(self.low, 12)).replace(0, 0.0001)
inner = (self.close - ts_min(self.low, 12)) / (divisor)
df = correlation(rank(inner), rank(self.volume), 6)
alpha = -1 * df.replace([-np.inf, np.inf], 0).fillna(value=0)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#56 (0 - (1 * (rank((sum(returns, 10) / sum(sum(returns, 2), 3))) * rank((returns * cap)))))
def alpha056(self):
alpha = (0 - (1 * (rank((sma(self.returns, 10) / sma(sma(self.returns, 2), 3))) * rank((self.returns * self.cap)))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#57 (0 - (1 * ((close - vwap) / decay_linear(rank(ts_argmax(close, 30)), 2))))
def alpha057(self):
alpha = (0 - (1 * ((self.close - self.vwap) / decay_linear(rank(ts_argmax(self.close, 30)), 2).CLOSE)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#58 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.sector), volume,3.92795), 7.89291), 5.50322))
def alpha058(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_vwap, self.volume, 4), 8), 6))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#59 (-1 * Ts_Rank(decay_linear(correlation(IndNeutralize(((vwap * 0.728317) + (vwap *(1 - 0.728317))), IndClass.industry), volume, 4.25197), 16.2289), 8.19648))
def alpha059(self):
indaverage_data = IndustryAverage_PreparationForAlpha059()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.vwap * 0.728317) + (self.vwap *(1 - 0.728317))
indneutralized_data = unindneutralized_data - indaverage_data
alpha = (-1 * ts_rank(decay_linear(correlation(indneutralized_data, self.volume, 4), 16), 8))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#60 (0 - (1 * ((2 * scale(rank(((((close - low) - (high - close)) / (high - low)) * volume)))) -scale(rank(ts_argmax(close, 10))))))
def alpha060(self):
divisor = (self.high - self.low).replace(0, 0.0001)
inner = ((self.close - self.low) - (self.high - self.close)) * self.volume / divisor
alpha = - ((2 * scale(rank(inner))) - scale(rank(ts_argmax(self.close, 10))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#61 (rank((vwap - ts_min(vwap, 16.1219))) < rank(correlation(vwap, adv180, 17.9282)))
def alpha061(self):
adv180 = sma(self.volume, 180)
alpha = (rank((self.vwap - ts_min(self.vwap, 16))) < rank(correlation(self.vwap, adv180, 18)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#62 ((rank(correlation(vwap, sum(adv20, 22.4101), 9.91009)) < rank(((rank(open) +rank(open)) < (rank(((high + low) / 2)) + rank(high))))) * -1)
def alpha062(self):
adv20 = sma(self.volume, 20)
alpha = ((rank(correlation(self.vwap, sma(adv20, 22), 10)) < rank(((rank(self.open) +rank(self.open)) < (rank(((self.high + self.low) / 2)) + rank(self.high))))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#63 ((rank(decay_linear(delta(IndNeutralize(close, IndClass.industry), 2.25164), 8.22237))- rank(decay_linear(correlation(((vwap * 0.318108) + (open * (1 - 0.318108))), sum(adv180,37.2467), 13.557), 12.2883))) * -1)
def alpha063(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv180 = sma(self.volume, 180)
alpha = ((rank(decay_linear(delta(indneutralized_close, 2), 8))- rank(decay_linear(correlation(((self.vwap * 0.318108) + (self.open * (1 - 0.318108))), sma(adv180, 38), 14), 12))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#64 ((rank(correlation(sum(((open * 0.178404) + (low * (1 - 0.178404))), 12.7054),sum(adv120, 12.7054), 16.6208)) < rank(delta(((((high + low) / 2) * 0.178404) + (vwap * (1 -0.178404))), 3.69741))) * -1)
def alpha064(self):
adv120 = sma(self.volume, 120)
alpha = ((rank(correlation(sma(((self.open * 0.178404) + (self.low * (1 - 0.178404))), 13),sma(adv120, 13), 17)) < rank(delta(((((self.high + self.low) / 2) * 0.178404) + (self.vwap * (1 -0.178404))), 3.69741))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#65 ((rank(correlation(((open * 0.00817205) + (vwap * (1 - 0.00817205))), sum(adv60,8.6911), 6.40374)) < rank((open - ts_min(open, 13.635)))) * -1)
def alpha065(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(((self.open * 0.00817205) + (self.vwap * (1 - 0.00817205))), sma(adv60,9), 6)) < rank((self.open - ts_min(self.open, 14)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#66 ((rank(decay_linear(delta(vwap, 3.51013), 7.23052)) + Ts_Rank(decay_linear(((((low* 0.96633) + (low * (1 - 0.96633))) - vwap) / (open - ((high + low) / 2))), 11.4157), 6.72611)) * -1)
def alpha066(self):
alpha = ((rank(decay_linear(delta(self.vwap, 4), 7).CLOSE) + ts_rank(decay_linear(((((self.low* 0.96633) + (self.low * (1 - 0.96633))) - self.vwap) / (self.open - ((self.high + self.low) / 2))), 11).CLOSE, 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#67 ((rank((high - ts_min(high, 2.14593)))^rank(correlation(IndNeutralize(vwap,IndClass.sector), IndNeutralize(adv20, IndClass.subindustry), 6.02936))) * -1)
def alpha067(self):
indaverage_adv20 = IndustryAverage_adv(20)
indaverage_adv20 = indaverage_adv20[indaverage_adv20.index.isin(self.available_dates)]
indaverage_adv20 = indaverage_adv20[self.industry]
indaverage_adv20 = indaverage_adv20.reset_index(drop=True)
adv20 = sma(self.volume, 20)
indneutralized_adv20 = adv20 - indaverage_adv20
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
alpha = rank((self.high - ts_min(self.high, 2))) ** rank(correlation(indneutralized_vwap, indneutralized_adv20, 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#68 ((Ts_Rank(correlation(rank(high), rank(adv15), 8.91644), 13.9333) <rank(delta(((close * 0.518371) + (low * (1 - 0.518371))), 1.06157))) * -1)
def alpha068(self):
adv15 = sma(self.volume, 15)
alpha = ((ts_rank(correlation(rank(self.high), rank(adv15), 9), 14) <rank(delta(((self.close * 0.518371) + (self.low * (1 - 0.518371))), 1.06157))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#69 ((rank(ts_max(delta(IndNeutralize(vwap, IndClass.industry), 2.72412),4.79344))^Ts_Rank(correlation(((close * 0.490655) + (vwap * (1 - 0.490655))), adv20, 4.92416),9.0615)) * -1)
def alpha069(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv20 = sma(self.volume, 20)
alpha = ((rank(ts_max(delta(indneutralized_vwap, 3),5)) ** ts_rank(correlation(((self.close * 0.490655) + (self.vwap * (1 - 0.490655))), adv20, 5),9)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#70 ((rank(delta(vwap, 1.29456))^Ts_Rank(correlation(IndNeutralize(close,IndClass.industry), adv50, 17.8256), 17.9171)) * -1)
def alpha070(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv50 = sma(self.volume, 50)
alpha = (rank(delta(self.vwap, 1)) ** ts_rank(correlation(indneutralized_close, adv50, 18), 18)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#71 max(Ts_Rank(decay_linear(correlation(Ts_Rank(close, 3.43976), Ts_Rank(adv180,12.0647), 18.0175), 4.20501), 15.6948), Ts_Rank(decay_linear((rank(((low + open) - (vwap +vwap)))^2), 16.4662), 4.4388))
def alpha071(self):
adv180 = sma(self.volume, 180)
p1=ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18), 4).CLOSE, 16)
p2=ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)), 16).CLOSE, 4)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = df['max']
#alpha = max(ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18).to_frame(), 4).CLOSE, 16), ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)).to_frame(), 16).CLOSE, 4))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#72 (rank(decay_linear(correlation(((high + low) / 2), adv40, 8.93345), 10.1519)) /rank(decay_linear(correlation(Ts_Rank(vwap, 3.72469), Ts_Rank(volume, 18.5188), 6.86671),2.95011)))
def alpha072(self):
adv40 = sma(self.volume, 40)
alpha = (rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 9).to_frame(), 10).CLOSE) /rank(decay_linear(correlation(ts_rank(self.vwap, 4), ts_rank(self.volume, 19), 7).to_frame(),3).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#73 (max(rank(decay_linear(delta(vwap, 4.72775), 2.91864)),Ts_Rank(decay_linear(((delta(((open * 0.147155) + (low * (1 - 0.147155))), 2.03608) / ((open *0.147155) + (low * (1 - 0.147155)))) * -1), 3.33829), 16.7411)) * -1)
def alpha073(self):
p1=rank(decay_linear(delta(self.vwap, 5).to_frame(), 3).CLOSE)
p2=ts_rank(decay_linear(((delta(((self.open * 0.147155) + (self.low * (1 - 0.147155))), 2) / ((self.open *0.147155) + (self.low * (1 - 0.147155)))) * -1).to_frame(), 3).CLOSE, 17)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#74 ((rank(correlation(close, sum(adv30, 37.4843), 15.1365)) <rank(correlation(rank(((high * 0.0261661) + (vwap * (1 - 0.0261661)))), rank(volume), 11.4791)))* -1)
def alpha074(self):
adv30 = sma(self.volume, 30)
alpha = ((rank(correlation(self.close, sma(adv30, 37), 15)) <rank(correlation(rank(((self.high * 0.0261661) + (self.vwap * (1 - 0.0261661)))), rank(self.volume), 11)))* -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#75 (rank(correlation(vwap, volume, 4.24304)) < rank(correlation(rank(low), rank(adv50),12.4413)))
def alpha075(self):
adv50 = sma(self.volume, 50)
alpha = (rank(correlation(self.vwap, self.volume, 4)) < rank(correlation(rank(self.low), rank(adv50),12)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#76 (max(rank(decay_linear(delta(vwap, 1.24383), 11.8259)),Ts_Rank(decay_linear(Ts_Rank(correlation(IndNeutralize(low, IndClass.sector), adv81,8.14941), 19.569), 17.1543), 19.383)) * -1)
def alpha076(self):
indaverage_low = IndustryAverage_low()
indaverage_low = indaverage_low[indaverage_low.index.isin(self.available_dates)]
indaverage_low = indaverage_low[self.industry]
indaverage_low = indaverage_low.reset_index(drop=True)
indneutralized_low = self.low - indaverage_low
adv81 = sma(self.volume, 81)
p1 = rank(decay_linear(delta(self.vwap.to_frame(), 1), 12))
p2 = ts_rank(decay_linear(ts_rank(correlation(indneutralized_low, adv81, 8).to_frame(), 20), 17), 19)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#77 min(rank(decay_linear(((((high + low) / 2) + high) - (vwap + high)), 20.0451)),rank(decay_linear(correlation(((high + low) / 2), adv40, 3.1614), 5.64125)))
def alpha077(self):
adv40 = sma(self.volume, 40)
p1=rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)).to_frame(), 20).CLOSE)
p2=rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 3).to_frame(), 6).CLOSE)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#78 (rank(correlation(sum(((low * 0.352233) + (vwap * (1 - 0.352233))), 19.7428),sum(adv40, 19.7428), 6.83313))^rank(correlation(rank(vwap), rank(volume), 5.77492)))
def alpha078(self):
adv40 = sma(self.volume, 40)
alpha = (rank(correlation(ts_sum(((self.low * 0.352233) + (self.vwap * (1 - 0.352233))), 20),ts_sum(adv40,20), 7)).pow(rank(correlation(rank(self.vwap), rank(self.volume), 6))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#79 (rank(delta(IndNeutralize(((close * 0.60733) + (open * (1 - 0.60733))),IndClass.sector), 1.23438)) < rank(correlation(Ts_Rank(vwap, 3.60973), Ts_Rank(adv150,9.18637), 14.6644)))
def alpha079(self):
indaverage_data = IndustryAverage_PreparationForAlpha079()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.close * 0.60733) + (self.open * (1 - 0.60733))
indneutralized_data = unindneutralized_data - indaverage_data
adv150 = sma(self.volume, 150)
alpha = (rank(delta(indneutralized_data, 1)) < rank(correlation(ts_rank(self.vwap, 4), ts_rank(adv150, 9), 15))) *1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#80 ((rank(Sign(delta(IndNeutralize(((open * 0.868128) + (high * (1 - 0.868128))),IndClass.industry), 4.04545)))^Ts_Rank(correlation(high, adv10, 5.11456), 5.53756)) * -1)
def alpha080(self):
indaverage_data = IndustryAverage_PreparationForAlpha080()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.open * 0.868128) + (self.high * (1 - 0.868128))
indneutralized_data = unindneutralized_data - indaverage_data
adv10 = sma(self.volume, 10)
alpha = rank(sign(delta(indneutralized_data, 4))) ** (ts_rank(correlation(self.high, adv10, 5), 6)) * -1
return alpha[self.start_date_index:self.end_date_index]
# Alpha#81 ((rank(Log(product(rank((rank(correlation(vwap, sum(adv10, 49.6054),8.47743))^4)), 14.9655))) < rank(correlation(rank(vwap), rank(volume), 5.07914))) * -1)
def alpha081(self):
adv10 = sma(self.volume, 10)
alpha = ((rank(log(product(rank((rank(correlation(self.vwap, ts_sum(adv10, 50),8)).pow(4))), 15))) < rank(correlation(rank(self.vwap), rank(self.volume), 5))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#82 (min(rank(decay_linear(delta(open, 1.46063), 14.8717)),Ts_Rank(decay_linear(correlation(IndNeutralize(volume, IndClass.sector), ((open * 0.634196) +(open * (1 - 0.634196))), 17.4842), 6.92131), 13.4283)) * -1)
def alpha082(self):
indaverage_volume = IndustryAverage_volume()
indaverage_volume = indaverage_volume[indaverage_volume.index.isin(self.available_dates)]
indaverage_volume = indaverage_volume[self.industry]
indaverage_volume = indaverage_volume.reset_index(drop=True)
indneutralized_volume = self.volume - indaverage_volume
p1 = rank(decay_linear(delta(self.open, 1).to_frame(), 15))
p2 = ts_rank(decay_linear(correlation(indneutralized_volume, ((self.open * 0.634196)+(self.open * (1 - 0.634196))), 17).to_frame(), 7), 13)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = -1 * df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#83 ((rank(delay(((high - low) / (sum(close, 5) / 5)), 2)) * rank(rank(volume))) / (((high -low) / (sum(close, 5) / 5)) / (vwap - close)))
def alpha083(self):
alpha = ((rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rank(rank(self.volume))) / (((self.high -self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#84 SignedPower(Ts_Rank((vwap - ts_max(vwap, 15.3217)), 20.7127), delta(close,4.96796))
def alpha084(self):
alpha = pow(ts_rank((self.vwap - ts_max(self.vwap, 15)), 21), delta(self.close,5))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#85 (rank(correlation(((high * 0.876703) + (close * (1 - 0.876703))), adv30,9.61331))^rank(correlation(Ts_Rank(((high + low) / 2), 3.70596), Ts_Rank(volume, 10.1595),7.11408)))
def alpha085(self):
adv30 = sma(self.volume, 30)
alpha = (rank(correlation(((self.high * 0.876703) + (self.close * (1 - 0.876703))), adv30,10)).pow(rank(correlation(ts_rank(((self.high + self.low) / 2), 4), ts_rank(self.volume, 10),7))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#86 ((Ts_Rank(correlation(close, sum(adv20, 14.7444), 6.00049), 20.4195) < rank(((open+ close) - (vwap + open)))) * -1)
def alpha086(self):
adv20 = sma(self.volume, 20)
alpha = ((ts_rank(correlation(self.close, sma(adv20, 15), 6), 20) < rank(((self.open+ self.close) - (self.vwap +self.open)))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#87 (max(rank(decay_linear(delta(((close * 0.369701) + (vwap * (1 - 0.369701))),1.91233), 2.65461)), Ts_Rank(decay_linear(abs(correlation(IndNeutralize(adv81,IndClass.industry), close, 13.4132)), 4.89768), 14.4535)) * -1)
def alpha087(self):
indaverage_adv81 = IndustryAverage_adv(81)
indaverage_adv81 = indaverage_adv81[indaverage_adv81.index.isin(self.available_dates)]
indaverage_adv81 = indaverage_adv81[self.industry]
indaverage_adv81 = indaverage_adv81.reset_index(drop=True)
adv81 = sma(self.volume, 81)
indneutralized_adv81 = adv81 - indaverage_adv81
p1 = rank(decay_linear(delta(((self.close * 0.369701) + (self.vwap * (1 - 0.369701))),2).to_frame(), 3))
p2 = ts_rank(decay_linear(abs(correlation(indneutralized_adv81, self.close, 13)), 5), 14)
p1=p1.iloc[:,0]
p2=p2.iloc[:,0]
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1 * df['max']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#88 min(rank(decay_linear(((rank(open) + rank(low)) - (rank(high) + rank(close))),8.06882)), Ts_Rank(decay_linear(correlation(Ts_Rank(close, 8.44728), Ts_Rank(adv60,20.6966), 8.01266), 6.65053), 2.61957))
def alpha088(self):
adv60 = sma(self.volume, 60)
p1=rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))).to_frame(),8).CLOSE)
p2=ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(adv60,21), 8).to_frame(), 7).CLOSE, 3)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#89 (Ts_Rank(decay_linear(correlation(((low * 0.967285) + (low * (1 - 0.967285))), adv10,6.94279), 5.51607), 3.79744) - Ts_Rank(decay_linear(delta(IndNeutralize(vwap,IndClass.industry), 3.48158), 10.1466), 15.3012))
def alpha089(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv10 = sma(self.volume, 10)
alpha = ts_rank(decay_linear(correlation(((self.low * 0.967285) + (self.low * (1 - 0.967285))), adv10, 7), 6), 4) - ts_rank(decay_linear(delta(indneutralized_vwap, 10)), 15)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#90 ((rank((close - ts_max(close, 4.66719)))^Ts_Rank(correlation(IndNeutralize(adv40,IndClass.subindustry), low, 5.38375), 3.21856)) * -1)
def alpha090(self):
indaverage_adv40 = IndustryAverage_adv(40)
indaverage_adv40 = indaverage_adv40[indaverage_adv40.index.isin(self.available_dates)]
indaverage_adv40 = indaverage_adv40[self.industry]
indaverage_adv40 = indaverage_adv40.reset_index(drop=True)
adv40 = sma(self.volume, 40)
indneutralized_adv40 = adv40 - indaverage_adv40
alpha = ((rank((self.close - ts_max(self.close, 5))) ** ts_rank(correlation(indneutralized_adv40, self.low, 5), 3)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#91 ((Ts_Rank(decay_linear(decay_linear(correlation(IndNeutralize(close,IndClass.industry), volume, 9.74928), 16.398), 3.83219), 4.8667) -rank(decay_linear(correlation(vwap, adv30, 4.01303), 2.6809))) * -1)
def alpha091(self):
indaverage_close = IndustryAverage_close()
indaverage_close = indaverage_close[indaverage_close.index.isin(self.available_dates)]
indaverage_close = indaverage_close[self.industry]
indaverage_close = indaverage_close.reset_index(drop=True)
indneutralized_close = self.close - indaverage_close
adv30 = sma(self.volume, 30)
alpha = ((ts_rank(decay_linear(decay_linear(correlation(indneutralized_close, self.volume, 10), 16), 4), 5) -rank(decay_linear(correlation(self.vwap, adv30, 4), 3))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#92 min(Ts_Rank(decay_linear(((((high + low) / 2) + close) < (low + open)), 14.7221),18.8683), Ts_Rank(decay_linear(correlation(rank(low), rank(adv30), 7.58555), 6.94024),6.80584))
def alpha092(self):
adv30 = sma(self.volume, 30)
p1=ts_rank(decay_linear(((((self.high + self.low) / 2) + self.close) < (self.low + self.open)).to_frame(), 15).CLOSE,19)
p2=ts_rank(decay_linear(correlation(rank(self.low), rank(adv30), 8).to_frame(), 7).CLOSE,7)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'min']=df['p2']
df.at[df['p2']>=df['p1'],'min']=df['p1']
alpha = df['min']
return alpha[self.start_date_index:self.end_date_index]
# Alpha#93 (Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.industry), adv81,17.4193), 19.848), 7.54455) / rank(decay_linear(delta(((close * 0.524434) + (vwap * (1 -0.524434))), 2.77377), 16.2664)))
def alpha093(self):
indaverage_vwap = IndustryAverage_vwap()
indaverage_vwap = indaverage_vwap[indaverage_vwap.index.isin(self.available_dates)]
indaverage_vwap = indaverage_vwap[self.industry]
indaverage_vwap = indaverage_vwap.reset_index(drop=True)
indneutralized_vwap = self.vwap - indaverage_vwap
adv81 = sma(self.volume, 81)
alpha = (ts_rank(decay_linear(correlation(indneutralized_vwap, adv81, 17).to_frame(), 20), 8) / rank(decay_linear(delta(((self.close * 0.524434) + (self.vwap * (1 -0.524434))), 3).to_frame(), 16)))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#94 ((rank((vwap - ts_min(vwap, 11.5783)))^Ts_Rank(correlation(Ts_Rank(vwap,19.6462), Ts_Rank(adv60, 4.02992), 18.0926), 2.70756)) * -1)
def alpha094(self):
adv60 = sma(self.volume, 60)
alpha = ((rank((self.vwap - ts_min(self.vwap, 12))).pow(ts_rank(correlation(ts_rank(self.vwap,20), ts_rank(adv60, 4), 18), 3)) * -1))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#95 (rank((open - ts_min(open, 12.4105))) < Ts_Rank((rank(correlation(sum(((high + low)/ 2), 19.1351), sum(adv40, 19.1351), 12.8742))^5), 11.7584))
def alpha095(self):
adv40 = sma(self.volume, 40)
alpha = (rank((self.open - ts_min(self.open, 12))) < ts_rank((rank(correlation(sma(((self.high + self.low)/ 2), 19), sma(adv40, 19), 13)).pow(5)), 12))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#96 (max(Ts_Rank(decay_linear(correlation(rank(vwap), rank(volume), 3.83878),4.16783), 8.38151), Ts_Rank(decay_linear(Ts_ArgMax(correlation(Ts_Rank(close, 7.45404),Ts_Rank(adv60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)) * -1)
def alpha096(self):
adv60 = sma(self.volume, 60)
p1=ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8)
p2=ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)
df=pd.DataFrame({'p1':p1,'p2':p2})
df.at[df['p1']>=df['p2'],'max']=df['p1']
df.at[df['p2']>=df['p1'],'max']=df['p2']
alpha = -1*df['max']
#alpha = (max(ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8), ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#97 ((rank(decay_linear(delta(IndNeutralize(((low * 0.721001) + (vwap * (1 - 0.721001))),IndClass.industry), 3.3705), 20.4523)) - Ts_Rank(decay_linear(Ts_Rank(correlation(Ts_Rank(low,7.87871), Ts_Rank(adv60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)) * -1)
def alpha097(self):
indaverage_data = IndustryAverage_PreparationForAlpha097()
indaverage_data = indaverage_data[indaverage_data.index.isin(self.available_dates)]
indaverage_data = indaverage_data[self.industry]
indaverage_data = indaverage_data.reset_index(drop=True)
unindneutralized_data = (self.low * 0.721001) + (self.vwap * (1 - 0.721001))
indneutralized_data = unindneutralized_data - indaverage_data
adv60 = sma(self.volume, 60)
alpha = ((rank(decay_linear(delta(indneutralized_data, 3).to_frame(), 20)) - ts_rank(decay_linear(ts_rank(correlation(ts_rank(self.low,8), ts_rank(adv60, 17), 5), 19).to_frame(), 16), 7)) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#98 (rank(decay_linear(correlation(vwap, sum(adv5, 26.4719), 4.58418), 7.18088)) -rank(decay_linear(Ts_Rank(Ts_ArgMin(correlation(rank(open), rank(adv15), 20.8187), 8.62571),6.95668), 8.07206)))
def alpha098(self):
adv5 = sma(self.volume, 5)
adv15 = sma(self.volume, 15)
alpha = (rank(decay_linear(correlation(self.vwap, sma(adv5, 26), 5).to_frame(), 7).CLOSE) -rank(decay_linear(ts_rank(ts_argmin(correlation(rank(self.open), rank(adv15), 21), 9),7).to_frame(), 8).CLOSE))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#99 ((rank(correlation(sum(((high + low) / 2), 19.8975), sum(adv60, 19.8975), 8.8136)) <rank(correlation(low, volume, 6.28259))) * -1)
def alpha099(self):
adv60 = sma(self.volume, 60)
alpha = ((rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum(adv60, 20), 9)) <rank(correlation(self.low, self.volume, 6))) * -1)
return alpha[self.start_date_index:self.end_date_index]
# Alpha#100 (0 - (1 * (((1.5 * scale(indneutralize(indneutralize(rank(((((close - low) - (high -close)) / (high - low)) * volume)), IndClass.subindustry), IndClass.subindustry))) -scale(indneutralize((correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30))),IndClass.subindustry))) * (volume / adv20))))
def alpha100(self):
indaverage_data_1 = IndustryAverage_PreparationForAlpha100_1()
indaverage_data_1 = indaverage_data_1[indaverage_data_1.index.isin(self.available_dates)]
indaverage_data_1 = indaverage_data_1[self.industry]
indaverage_data_1 = indaverage_data_1.reset_index(drop=True)
unindneutralized_data_1 = rank(((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)) * self.volume))
indneutralized_data_1 = unindneutralized_data_1 - indaverage_data_1 #there's a problem in calculation here.
indaverage_data_2 = IndustryAverage_PreparationForAlpha100_2()
indaverage_data_2 = indaverage_data_2[indaverage_data_2.index.isin(self.available_dates)]
indaverage_data_2 = indaverage_data_2[self.industry]
indaverage_data_2 = indaverage_data_2.reset_index(drop=True)
adv20 = sma(self.volume, 20)
unindneutralized_data_2 = (correlation(self.close, rank(adv20), 5) - rank(ts_argmin(self.close, 30)))
indneutralized_data_2 = unindneutralized_data_2 - indaverage_data_2
alpha = (0 - (1 * (((1.5 * scale(indneutralized_data_1))-scale(indneutralized_data_2)) * (self.volume / adv20))))
return alpha[self.start_date_index:self.end_date_index]
# Alpha#101 ((close - open) / ((high - low) + .001))
def alpha101(self):
alpha = (self.close - self.open) /((self.high - self.low) + 0.001)
return alpha[self.start_date_index:self.end_date_index]
class GTJAalphas(object):
def __init__(self, ts_code="000001.SZ",start_date=20210101,end_date=20211231):
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_chosen=local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition="TS_CODE = " + "'" + ts_code + "'").sort_values(by="TRADE_DATE", ascending=True)
stock_data_chosen=pd.merge(quotations_daily_chosen,stock_indicators_daily_chosen,on=['TRADE_DATE','TS_CODE'],how="left")
stock_data_chosen["TOTAL_MV"]=stock_data_chosen["TOTAL_SHARE"]*stock_data_chosen["CLOSE"]
stock_data_chosen=stock_data_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
stock_data_chosen["TRADE_DATE"]=stock_data_chosen["TRADE_DATE"].astype(int)
self.open = stock_data_chosen['OPEN']
self.high = stock_data_chosen['HIGH']
self.low = stock_data_chosen['LOW']
self.close = stock_data_chosen['CLOSE']
self.volume = stock_data_chosen['VOL']*100
self.amount = stock_data_chosen['AMOUNT']*1000
self.returns = stock_data_chosen['CHANGE'] / stock_data_chosen['OPEN']
self.vwap = (stock_data_chosen['AMOUNT']*1000)/(stock_data_chosen['VOL']*100+1)
self.cap = stock_data_chosen['TOTAL_MV']
self.industry = local_source.get_stock_list(cols='TS_CODE,INDUSTRY', condition='TS_CODE = '+'"'+ts_code+'"')['INDUSTRY'].iloc[0]
self.available_dates = stock_data_chosen["TRADE_DATE"]
if ts_code[-2:]=='SZ': index_code = "399001.SZ"
else: index_code = "000001.SH"
indices_daily_chosen=local_source.get_indices_daily(cols='TRADE_DATE,INDEX_CODE,OPEN,CLOSE',condition='INDEX_CODE = '+'"'+index_code+'"').sort_values(by="TRADE_DATE", ascending=True)
indices_daily_chosen=indices_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
indices_daily_chosen = pd.merge(stock_data_chosen["TRADE_DATE"], indices_daily_chosen, on=['TRADE_DATE'], how="left")
self.benchmarkindexopen = indices_daily_chosen['OPEN']
self.benchmarkindexclose = indices_daily_chosen['CLOSE']
output_dates = stock_data_chosen[(stock_data_chosen["TRADE_DATE"]>=start_date)*(stock_data_chosen["TRADE_DATE"]<=end_date)]["TRADE_DATE"]
start_available_date = output_dates.iloc[0]
end_available_date = output_dates.iloc[-1]
self.start_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == start_available_date].index[0]
self.end_date_index = stock_data_chosen["TRADE_DATE"][stock_data_chosen["TRADE_DATE"].values == end_available_date].index[0] +1
#Alpha1 (-1 * CORR(RANK(DELTA(LOG(VOLUME), 1)), RANK(((CLOSE - OPEN) / OPEN)), 6))
def GTJAalpha001(self):
alpha = -1 * correlation(rank(delta(np.log(self.volume),1)),rank(((self.close-self.open)/self.open)), 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha2 (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), 1))
def GTJAalpha002(self):
alpha = -1 * delta((((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)), 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha3 SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),6)
def GTJAalpha003(self):
delay1 = self.close.shift()
condition1 = (self.close > delay1)
inner1_true = np.minimum(self.low, delay1)
inner1_false = np.maximum(self.low, delay1)
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close == delay1)
inner2_true = pd.Series(np.zeros(len(condition2)))
inner2_false = self.close - inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha4 ((((SUM(CLOSE, 8) / 8) + STD(CLOSE, 8)) < (SUM(CLOSE, 2) / 2)) ? (-1 * 1) : (((SUM(CLOSE, 2) / 2) < ((SUM(CLOSE, 8) / 8) - STD(CLOSE, 8))) ? 1 : (((1 < (VOLUME / MEAN(VOLUME,20))) || ((VOLUME / MEAN(VOLUME,20)) == 1)) ? 1 : (-1 * 1))))
def GTJAalpha004(self):
condition1 = ((1 < (self.volume / sma(self.volume,20))) | ((self.volume / sma(self.volume,20)) == 1))
inner1_true = pd.Series(np.ones(len(condition1)))
inner1_false = -1 * pd.Series(np.ones(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((ts_sum(self.close, 2) / 2) < ((ts_sum(self.close, 8) / 8) - stddev(self.close, 8)))
inner2_true = -1 * pd.Series(np.ones(len(condition2)))
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha5 (-1 * TSMAX(CORR(TSRANK(VOLUME, 5), TSRANK(HIGH, 5), 5), 3))
def GTJAalpha005(self):
alpha = -1 * ts_max(correlation(ts_rank(self.volume,5), ts_rank(self.high,5), 5) ,3)
return alpha[self.start_date_index:self.end_date_index]
#Alpha6 (RANK(SIGN(DELTA((((OPEN * 0.85) + (HIGH * 0.15))), 4)))* -1)
def GTJAalpha006(self):
alpha = rolling_rank(sign(delta((((self.open * 0.85) + (self.high * 0.15))), 4)))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha7 ((RANK(MAX((VWAP - CLOSE), 3)) + RANK(MIN((VWAP - CLOSE), 3))) * RANK(DELTA(VOLUME, 3)))
def GTJAalpha007(self):
alpha = (rolling_rank(np.maximum((self.vwap - self.close), 3)) + rolling_rank(np.minimum((self.vwap - self.close), 3))) * rolling_rank(delta(self.volume, 3))
return alpha[self.start_date_index:self.end_date_index]
#Alpha8 RANK(DELTA(((((HIGH + LOW) / 2) * 0.2) + (VWAP * 0.8)), 4) * -1)
def GTJAalpha008(self):
alpha = rolling_rank(delta(((((self.high + self.low) / 2) * 0.2) + (self.vwap * 0.8)), 4) * -1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha9 SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,7,2)
def GTJAalpha009(self):
alpha = ema(((self.high+self.low)/2-(delay(self.high,1)+delay(self.low,1))/2)*(self.high-self.low)/self.volume,7,2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha10 (RANK(MAX(((RET < 0) ? STD(RET, 20) : CLOSE)^2),5))
def GTJAalpha010(self):
condition1 = (self.returns < 0)
inner1_true = stddev(self.returns, 20)
inner1_false = self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
alpha = rolling_rank(np.maximum(inner1**2, 5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha11 SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,6)
def GTJAalpha011(self):
alpha = ts_sum(((self.close-self.low)-(self.high-self.close))/(self.high-self.low)*self.volume,6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha12 (RANK((OPEN - (SUM(VWAP, 10) / 10)))) * (-1 * (RANK(ABS((CLOSE - VWAP)))))
def GTJAalpha012(self):
alpha = rolling_rank((self.open - (ts_sum(self.vwap, 10) / 10))) * -1 * (rolling_rank(abs((self.close - self.vwap))))
return alpha[self.start_date_index:self.end_date_index]
#Alpha13 (((HIGH * LOW)^0.5) - VWAP)
def GTJAalpha013(self):
alpha = ((self.high * self.low)**0.5) - self.vwap
return alpha[self.start_date_index:self.end_date_index]
#Alpha14 CLOSE-DELAY(CLOSE,5)
def GTJAalpha014(self):
alpha = self.close - delay(self.close,5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha15 OPEN/DELAY(CLOSE,1)-1
def GTJAalpha015(self):
alpha = (self.open/delay(self.close, 1)) -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha16 (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), 5)), 5))
def GTJAalpha016(self):
alpha = -1 * ts_max(rolling_rank(correlation(rolling_rank(self.volume), rolling_rank(self.vwap), 5)), 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha17 RANK((VWAP - MAX(VWAP, 15)))^DELTA(CLOSE, 5)
def GTJAalpha17(self):
alpha = rolling_rank((self.vwap - np.maximum(self.vwap, 15)))**delta(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha18 CLOSE/DELAY(CLOSE,5)
def GTJAalpha018(self):
alpha = self.close / delay(self.close, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha19 (CLOSE<DELAY(CLOSE,5)?(CLOSE-DELAY(CLOSE,5))/DELAY(CLOSE,5):(CLOSE=DELAY(CLOSE,5)?0:(CLOSE-DELAY(CLOSE,5))/CLOSE))
def GTJAalpha019(self):
condition1 = (self.close == delay(self.close,5))
inner1_true=pd.Series(np.zeros(len(condition1)))
inner1_false=(self.close-delay(self.close,5))/self.close
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close<delay(self.close,5))
inner2_true = (self.close-delay(self.close,5))/delay(self.close,5)
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha20 (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100
def GTJAalpha020(self):
alpha = (self.close-delay(self.close,6)) / delay(self.close,6) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha21 REGBETA(MEAN(CLOSE,6),SEQUENCE(6))
def GTJAalpha021(self): #I'm not sure if I've understood the formula correctly.
y = sma(self.close, 6)
alpha = pd.Series(np.nan, index=self.close.index)
for i in range(6-1,len(y)):
alpha.iloc[i]=sp.stats.linregress(pd.Series(np.arange(1,7)), y[i-6+1:i+1])[0]
return alpha[self.start_date_index:self.end_date_index]
#Alpha22 SMEAN(((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)-DELAY((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6),3)),12,1)
def GTJAalpha022(self):
alpha = ema(((self.close-sma(self.close,6))/sma(self.close,6)-delay((self.close-sma(self.close,6))/sma(self.close,6),3)),12,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha23 SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1) / (SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1)+SMA((CLOSE<=DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1))*100
def GTJAalpha023(self):
condition1 = (self.close > delay(self.close,1))
inner1_true= stddev(self.close, 20)
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close <= delay(self.close,1))
inner2_true= stddev(self.close, 20)
inner2_false = pd.Series(np.zeros(len(condition2)))
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ema(inner1,20,1) / (ema(inner1,20,1)+ema(inner2,20,1))*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha24 SMA(CLOSE-DELAY(CLOSE,5),5,1)
def GTJAalpha024(self):
alpha = ema(self.close-delay(self.close,5),5,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha25 ((-1 * RANK((DELTA(CLOSE, 7) * (1 - RANK(DECAYLINEAR((VOLUME / MEAN(VOLUME,20)), 9)))))) * (1 + RANK(SUM(RET, 250))))
def GTJAalpha025(self):
alpha = ((-1 * rolling_rank((delta(self.close, 7) * (1 - rolling_rank(decay_linear((self.volume / sma(self.volume,20)), 9)))))) * (1 + rolling_rank(ts_sum(self.returns, 250))))
return alpha[self.start_date_index:self.end_date_index]
#Alpha26 ((((SUM(CLOSE, 7) / 7) - CLOSE)) + ((CORR(VWAP, DELAY(CLOSE, 5), 230))))
def GTJAalpha026(self):
alpha = (((ts_sum(self.close, 7) / 7) - self.close)) + ((correlation(self.vwap, delay(self.close, 5), 230)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha27 WMA((CLOSE-DELAY(CLOSE,3))/DELAY(CLOSE,3)*100+(CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100,12)
def GTJAalpha027(self):
alpha = wma(( self.close-delay(self.close,3))/delay(self.close,3)*100 + (self.close-delay(self.close,6))/delay(self.close,6)*100 ,12)
return alpha[self.start_date_index:self.end_date_index]
#Alpha28 3*SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1)-2*SMA(SMA((CLOSE-TSMIN(LOW,9))/(MAX(HIGH,9)-TSMAX(LOW,9))*100,3,1),3,1)
def GTJAalpha028(self):
alpha = 3*ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1)-2*ema(ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1),3,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha29 (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*VOLUME
def GTJAalpha029(self):
alpha = (self.close-delay(self.close,6))/delay(self.close,6)*self.volume
return alpha[self.start_date_index:self.end_date_index]
#Alpha30 WMA((REGRESI(CLOSE/DELAY(CLOSE)-1,MKT,SMB,HML,60))^2,20)
def GTJAalpha030(self):
y = (self.close/delay(self.close)) -1
y.rename("y",inplace=True)
y = pd.concat([self.available_dates, y],axis=1)
MKT = self.benchmarkindexclose.pct_change()
MKT.rename("MKT", inplace=True)
MKT = pd.concat([self.available_dates,MKT],axis=1)
FFfactor_data=pd.read_csv("FFfactors_daily.csv")
FFfactor_data_needed = FFfactor_data[["TRADE_DATE","SMB","HML"]]
dt = pd.merge(y, MKT, on=['TRADE_DATE'], how="left")
dt = pd.merge(dt, FFfactor_data_needed, on=['TRADE_DATE'], how="left")
dt["const"]=1
result = pd.Series(np.nan, index=dt.index)
for i in range(60-1,len(y)):
dt_piece = dt[i-60+1:i+1]
dt_piece= dt_piece.dropna()
y = dt_piece["y"]
x = dt_piece[["MKT","SMB","HML","const"]]
if len(y)!=0:
model = sm.OLS(y,x)
result.iloc[i] = model.fit().params.loc["const"]
print((result)**2)
alpha = wma((result)**2,20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha31 (CLOSE-MEAN(CLOSE,12))/MEAN(CLOSE,12)*100
def GTJAalpha031(self):
alpha = (self.close-sma(self.close,12))/sma(self.close,12)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha32 (-1 * SUM(RANK(CORR(RANK(HIGH), RANK(VOLUME), 3)), 3))
def GTJAalpha032(self):
alpha = -1 * ts_sum(rolling_rank(correlation(rolling_rank(self.high), rolling_rank(self.volume), 3)), 3)
return alpha[self.start_date_index:self.end_date_index]
#Alpha33 ((((-1 * TSMIN(LOW, 5)) + DELAY(TSMIN(LOW, 5), 5)) * RANK(((SUM(RET, 240) - SUM(RET, 20)) / 220))) * TSRANK(VOLUME, 5))
def GTJAalpha033(self):
alpha = (((-1 * ts_min(self.low, 5)) + delay(ts_min(self.low, 5), 5)) * ts_rank(((ts_sum(self.returns, 240) - ts_sum(self.returns, 20)) / 220))) * ts_rank(self.volume, 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha34 MEAN(CLOSE,12)/CLOSE
def GTJAalpha034(self):
alpha = sma(self.close,12) / self.close
return alpha[self.start_date_index:self.end_date_index]
#Alpha35 (MIN(RANK(DECAYLINEAR(DELTA(OPEN, 1), 15)), RANK(DECAYLINEAR(CORR((VOLUME), ((OPEN * 0.65) + (OPEN *0.35)), 17),7))) * -1)
def GTJAalpha035(self):
alpha = np.minimum(rolling_rank(decay_linear(delta(self.open, 1), 15)), rolling_rank(decay_linear(correlation((self.volume), ((self.open * 0.65) + (self.open *0.35)), 17),7))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha36 RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP)), 6), 2)
def GTJAalpha036(self):
alpha = rolling_rank(ts_sum(correlation(rolling_rank(self.volume), rolling_rank(self.vwap)), 6), 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha37 (-1 * RANK(((SUM(OPEN, 5) * SUM(RET, 5)) - DELAY((SUM(OPEN, 5) * SUM(RET, 5)), 10))))
def GTJAalpha037(self):
alpha = -1 * rolling_rank(((ts_sum(self.open, 5) * ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5) * ts_sum(self.returns, 5)), 10)))
return alpha[self.start_date_index:self.end_date_index]
#Alpha38 (((SUM(HIGH, 20) / 20) < HIGH) ? (-1 * DELTA(HIGH, 2)) : 0)
def GTJAalpha038(self):
condition1 = ((ts_sum(self.high, 20) / 20) < self.high)
inner1_true= -1 * delta(self.high, 2)
inner1_false = pd.Series(np.zeros(len(condition1)))
alpha = pd.Series(np.where(condition1, inner1_true, inner1_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha39 ((RANK(DECAYLINEAR(DELTA((CLOSE), 2),8)) - RANK(DECAYLINEAR(CORR(((VWAP * 0.3) + (OPEN * 0.7)), SUM(MEAN(VOLUME,180), 37), 14), 12))) * -1)
def GTJAalpha039(self):
alpha = ((rolling_rank(decay_linear(delta((self.close), 2),8)) - rolling_rank(decay_linear(correlation(((self.vwap * 0.3) + (self.open * 0.7)), ts_sum(sma(self.volume,180), 37), 14), 12))) * -1)
return alpha
#Alpha40 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:0),26)/SUM((CLOSE<=DELAY(CLOSE,1)?VOLUME:0),26)*100
def GTJAalpha040(self):
condition1 = (self.close > delay(self.close,1))
inner1_true= self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close <= delay(self.close,1))
inner2_true= self.volume
inner2_false = pd.Series(np.zeros(len(condition2)))
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner1,26) / ts_sum(inner2,26)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha41 (RANK(MAX(DELTA((VWAP), 3), 5))* -1)
def GTJAalpha041(self):
alpha = rolling_rank(np.maximum(delta((self.vwap), 3), 5))* -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha42 ((-1 * RANK(STD(HIGH, 10))) * CORR(HIGH, VOLUME, 10))
def GTJAalpha042(self):
alpha = (-1 * rolling_rank(stddev(self.high, 10))) * correlation(self.high, self.volume, 10)
return alpha[self.start_date_index:self.end_date_index]
#Alpha43 SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),6)
def GTJAalpha043(self):
condition1 = (self.close < delay(self.close,1))
inner1_true = -1* self.volume
inner1_false = pd.Series(np.zeros(len(condition1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close > delay(self.close,1))
inner2_true = self.volume
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2,6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha44 (TSRANK(DECAYLINEAR(CORR(((LOW )), MEAN(VOLUME,10), 7), 6),4) + TSRANK(DECAYLINEAR(DELTA((VWAP), 3), 10), 15))
def GTJAalpha044(self):
alpha = ts_rank(decay_linear(correlation(self.low, sma(self.volume,10), 7), 6),4) + ts_rank(decay_linear(delta((self.vwap), 3), 10), 15)
return alpha[self.start_date_index:self.end_date_index]
#Alpha45 (RANK(DELTA((((CLOSE * 0.6) + (OPEN *0.4))), 1)) * RANK(CORR(VWAP, MEAN(VOLUME,150), 15)))
def GTJAalpha045(self):
alpha = rolling_rank(delta((((self.close * 0.6) + (self.open *0.4))), 1)) * rolling_rank(correlation(self.vwap, sma(self.volume,150), 15))
return alpha[self.start_date_index:self.end_date_index]
#Alpha46 (MEAN(CLOSE,3)+MEAN(CLOSE,6)+MEAN(CLOSE,12)+MEAN(CLOSE,24))/(4*CLOSE)
def GTJAalpha046(self):
alpha = (sma(self.close,3)+sma(self.close,6)+sma(self.close,12)+sma(self.close,24))/(4*self.close)
return alpha[self.start_date_index:self.end_date_index]
#Alpha47 SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,9,1)
def GTJAalpha047(self):
alpha = ema((ts_max(self.high,6)-self.close)/(ts_max(self.high,6)-ts_min(self.low,6))*100,9,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha48 (-1*((RANK(((SIGN((CLOSE - DELAY(CLOSE, 1))) + SIGN((DELAY(CLOSE, 1) - DELAY(CLOSE, 2)))) + SIGN((DELAY(CLOSE, 2) - DELAY(CLOSE, 3)))))) * SUM(VOLUME, 5)) / SUM(VOLUME, 20))
def GTJAalpha048(self):
alpha = -1*((rolling_rank(((sign((self.close - delay(self.close, 1))) + sign((delay(self.close, 1) - delay(self.close, 2)))) + sign((delay(self.close, 2) - delay(self.close, 3)))))) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha49 SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha049(self):
condition1 = ((self.high+self.low)>=(delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low)<=(delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = ts_sum(inner1,12) / (ts_sum(inner1,12)+ts_sum(inner2,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha50 SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))),12))-SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HI GH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0: MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELA Y(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha050(self):
condition1 = ((self.high+self.low) >= (delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low) <= (delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = (ts_sum(inner2,12)-ts_sum(inner1,12))/(ts_sum(inner2,12)+ts_sum(inner1,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha51 SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))),12))
def GTJAalpha051(self):
condition1 = ((self.high+self.low) >= (delay(self.high,1)+delay(self.low,1)))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = np.maximum(abs(self.high-delay(self.high,1)),abs(self.low-delay(self.low,1)))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = ((self.high+self.low) <= (delay(self.high,1)+delay(self.low,1)))
inner2 = pd.Series(np.where(condition2, inner1_true, inner1_false))
alpha = ts_sum(inner2,12) / (ts_sum(inner1,12)+ts_sum(inner2,12))
return alpha[self.start_date_index:self.end_date_index]
#Alpha52 SUM(MAX(0,HIGH-DELAY((HIGH+LOW+CLOSE)/3,1)),26)/SUM(MAX(0,DELAY((HIGH+LOW+CLOSE)/3,1)-LOW),26)* 100
def GTJAalpha052(self):
alpha = ts_sum(np.maximum(0,self.high-delay((self.high+self.low+self.close)/3,1)),26)/ts_sum(np.maximum(0,delay((self.high+self.low+self.close)/3,1)-self.low),26)* 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha53 COUNT(CLOSE>DELAY(CLOSE,1),12)/12*100
def GTJAalpha053(self):
condition = (self.close>delay(self.close,1))
count = pd.Series(np.nan, index=self.close.index)
for i in range(12-1,len(condition)):
count.iloc[i]=condition[i-12+1,i+1].sum()
alpha = count / 12 * 100
return alpha[self.start_date_index:self.end_date_index]
#Alpha54 (-1 * RANK((STD(ABS(CLOSE - OPEN)) + (CLOSE - OPEN)) + CORR(CLOSE, OPEN,10)))
def GTJAalpha054(self):
alpha = -1 * rolling_rank((stddev(abs(self.close - self.open)) + (self.close - self.open)) + correlation(self.close,self.open,10))
return alpha[self.start_date_index:self.end_date_index]
#Alpha55 SUM(16*(CLOSE-DELAY(CLOSE,1)+(CLOSE-OPEN)/2+DELAY(CLOSE,1)-DELAY(OPEN,1))/((ABS(HIGH-DELAY(CLOSE,1))>ABS(LOW-DELAY(CLOSE,1)) & ABS(HIGH-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1))?ABS(HIGH-DELAY(CLOSE,1))+ABS(LOW-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:(ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1)) & ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(CLOSE,1))?ABS(LOW-DELAY(CLOSE,1))+ABS(HIGH-DELAY(CLO SE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:ABS(HIGH-DELAY(LOW,1))+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4)))*MAX(ABS(HIGH-DELAY(CLOSE,1)),ABS(LOW-DELAY(CLOSE,1))),20)
def GTJAalpha055(self):
condition1 = (abs(self.low-delay(self.close,1))>abs(self.high-delay(self.low,1))) & (abs(self.low-delay(self.close,1))>abs(self.high-delay(self.close,1)))
inner1_true = abs(self.low-delay(self.close,1)) + abs(self.high-delay(self.close,1))/2 + abs(delay(self.close,1)-delay(self.open,1))/4
inner1_false = abs(self.high-delay(self.low,1)) + abs(delay(self.close,1)-delay(self.open,1))/4
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (abs(self.high-delay(self.close,1))>abs(self.low-delay(self.close,1))) & (abs(self.high-delay(self.close,1))>abs(self.high-delay(self.low,1)))
inner2_true = abs(self.high-delay(self.close,1))+abs(self.low-delay(self.close,1))/2+abs(delay(self.close,1)-delay(self.open,1))/4
inner2_false = inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(16*(self.close-delay(self.close,1)+(self.close-self.open)/2+delay(self.close,1)-delay(self.open,1))/(inner2)*np.maximum(abs(self.high-delay(self.close,1)),abs(self.low-delay(self.close,1))),20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha56 (RANK((OPEN - TSMIN(OPEN, 12))) < RANK((RANK(CORR(SUM(((HIGH + LOW) / 2), 19), SUM(MEAN(VOLUME,40), 19), 13))^5)))
def GTJAalpha056(self):
alpha = rolling_rank((self.open - ts_min(self.open, 12))) < rolling_rank((rolling_rank(correlation(ts_sum(((self.high + self.low) / 2), 19), ts_sum(sma(self.volume,40), 19), 13))**5))
return alpha[self.start_date_index:self.end_date_index]
#Alpha57 SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1)
def GTJAalpha057(self):
alpha = ema((self.close-ts_min(self.low,9))/(ts_max(self.high,9)-ts_min(self.low,9))*100,3,1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha58 COUNT(CLOSE>DELAY(CLOSE,1),20)/20*10
def GTJAalpha058(self):
condition = (self.close>delay(self.close,1))
count = pd.Series(np.nan, index=self.close.index)
for i in range(20-1,len(condition)):
count.iloc[i]=condition[i-20+1,i+1].sum()
alpha = count / 20 * 10
return alpha[self.start_date_index:self.end_date_index]
#Alpha59 SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),20)
def GTJAalpha059(self):
condition1 = self.close > delay(self.close,1)
inner1_true = np.minimum(self.low,delay(self.close,1))
inner1_false = np.maximum(self.high,delay(self.close,1))
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (self.close == delay(self.close,1))
inner2_true = pd.Series(np.zeros(len(condition2)))
inner2_false = self.close-inner1
inner2 = pd.Series(np.where(condition2, inner2_true, inner2_false))
alpha = ts_sum(inner2, 20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha60 SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,20)
def GTJAalpha060(self):
alpha = ts_sum(((self.close-self.low)-(self.high-self.close))/(self.high-self.low)*self.volume,20)
return alpha[self.start_date_index:self.end_date_index]
#Alpha61 (MAX(RANK(DECAYLINEAR(DELTA(VWAP, 1), 12)), RANK(DECAYLINEAR(RANK(CORR((LOW),MEAN(VOLUME,80), 8)), 17))) * -1)
def GTJAalpha061(self):
alpha = np.maximum(rolling_rank(decay_linear(delta(self.vwap, 1), 12)), rolling_rank(decay_linear(rolling_rank(correlation(self.low,sma(self.volume,80), 8)), 17))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha62 (-1 * CORR(HIGH, RANK(VOLUME), 5))
def GTJAalpha062(self):
alpha = -1 * correlation(self.high, rolling_rank(self.volume), 5)
return alpha[self.start_date_index:self.end_date_index]
#Alpha63 SMA(MAX(CLOSE-DELAY(CLOSE,1),0),6,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),6,1)*100
def GTJAalpha063(self):
alpha = ema(np.maximum(self.close-delay(self.close,1),0),6,1) / ema(abs(self.close-delay(self.close,1)),6,1)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha64 (MAX(RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), 4), 4)), RANK(DECAYLINEAR(MAX(CORR(RANK(CLOSE), RANK(MEAN(VOLUME,60)), 4), 13), 14))) * -1)
def GTJAalpha064(self):
alpha = np.maximum(rolling_rank(decay_linear(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 4), 4)), rolling_rank(decay_linear(np.maximum(correlation(rolling_rank(self.close), rolling_rank(sma(self.volume,60)), 4), 13), 14))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha65 MEAN(CLOSE,6)/CLOSE
def GTJAalpha065(self):
alpha = sma(self.close,6)/self.close
return alpha[self.start_date_index:self.end_date_index]
#Alpha66 (CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)*100
def GTJAalpha066(self):
alpha = (self.close-sma(self.close,6)) / sma(self.close,6) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha67 SMA(MAX(CLOSE-DELAY(CLOSE,1),0),24,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),24,1)*100
def GTJAalpha067(self):
alpha = ema(np.maximum(self.close-delay(self.close,1),0),24,1) / ema(abs(self.close-delay(self.close,1)),24,1) *100
return alpha[self.start_date_index:self.end_date_index]
#Alpha68 SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,15,2)
def GTJAalpha068(self):
alpha = ema(((self.high+self.low)/2-(delay(self.high,1)+delay(self.low,1))/2)*(self.high-self.low)/self.volume, 15, 2)
return alpha[self.start_date_index:self.end_date_index]
#Alpha69 (SUM(DTM,20)>SUM(DBM,20)?(SUM(DTM,20)-SUM(DBM,20))/SUM(DTM,20):(SUM(DTM,20)=SUM(DBM,20)?0:(SUM(DTM,20)-SUM(DBM,20))/SUM(DBM,20)))
def GTJAalpha069(self):
condition_dtm = (self.open<=delay(self.open,1))
dtm_true = pd.Series(np.zeros(len(condition_dtm)))
dtm_false = np.maximum(self.high-self.open, self.open-delay(self.open,1))
dtm = pd.Series(np.where(condition_dtm, dtm_true, dtm_false))
condition_dbm = (self.open>=delay(self.open,1))
dbm_true = pd.Series(np.zeros(len(condition_dbm)))
dbm_false = np.maximum(self.open-self.low, self.open-delay(self.open,1))
dbm = pd.Series(np.where(condition_dbm, dbm_true, dbm_false))
condition1 = (ts_sum(dtm,20) == ts_sum(dbm,20))
inner1_true = pd.Series(np.zeros(len(condition1)))
inner1_false = (ts_sum(dtm,20)-ts_sum(dbm,20)) / ts_sum(dbm,20)
inner1 = pd.Series(np.where(condition1, inner1_true, inner1_false))
condition2 = (ts_sum(dtm,20) > ts_sum(dbm,20))
inner2_true = (ts_sum(dtm,20)-ts_sum(dbm,20)) / ts_sum(dtm,20)
inner2_false = inner1
alpha = pd.Series(np.where(condition2, inner2_true, inner2_false))
return alpha[self.start_date_index:self.end_date_index]
#Alpha70 STD(AMOUNT,6)
def GTJAalpha070(self):
alpha = stddev(self.amount, 6)
return alpha[self.start_date_index:self.end_date_index]
#Alpha71 (CLOSE-MEAN(CLOSE,24))/MEAN(CLOSE,24)*100
def GTJAalpha071(self):
alpha = (self.close-sma(self.close,24))/sma(self.close,24)*100
return alpha[self.start_date_index:self.end_date_index]
#Alpha72 SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,15,1)
def GTJAalpha072(self):
alpha = ema((ts_max(self.high,6)-self.close)/(ts_max(self.high,6)-ts_min(self.low,6))*100, 15, 1)
return alpha[self.start_date_index:self.end_date_index]
#Alpha73 ((TSRANK(DECAYLINEAR(DECAYLINEAR(CORR((CLOSE), VOLUME, 10), 16), 4), 5) - RANK(DECAYLINEAR(CORR(VWAP, MEAN(VOLUME,30), 4),3))) * -1)
def GTJAalpha073(self):
alpha = (ts_rank(decay_linear(decay_linear(correlation(self.close, self.volume, 10), 16), 4), 5) - rolling_rank(decay_linear(correlation(self.vwap, sma(self.volume,30), 4),3))) * -1
return alpha[self.start_date_index:self.end_date_index]
#Alpha74 (RANK(CORR(SUM(((LOW * 0.35) + (VWAP * 0.65)), 20), SUM(MEAN(VOLUME,40), 20), 7)) + RANK(CORR(RANK(VWAP), RANK(VOLUME), 6)))
def GTJAalpha074(self):
alpha = rolling_rank(correlation(ts_sum(((self.low * 0.35) + (self.vwap * 0.65)), 20), ts_sum(sma(self.volume,40), 20), 7)) + rolling_rank(correlation(rolling_rank(self.vwap), rolling_rank(self.volume), 6))
return alpha[self.start_date_index:self.end_date_index]
#Alpha75 COUNT(CLOSE>OPEN & BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50)/COUNT(BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50)
def GTJAalpha075(self):
condition_count1 = ((self.close>self.open) & (self.benchmarkclose<self.benchmarkopen))
count1 =
|
pd.Series(np.nan, index=condition_count1.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import sys
import os
from pandas.io import pickle
# import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
# base_image='gcr.io/dots-stock/py38-pandas-cal',
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def set_defaults()-> NamedTuple(
'Outputs',
[
('date_ref',str),
('n_days', int),
('period_extra', int)
]):
import pandas as pd
from trading_calendars import get_calendar
today = pd.Timestamp.now('Asia/Seoul').strftime('%Y%m%d')
today = '20210903'
period_to_train = 20
period_extra = 100
n_days = period_to_train + period_extra
cal_KRX = get_calendar('XKRX')
def get_krx_on_dates_start_end(start, end):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(start=start,
end=end, freq='C',
holidays=cal_KRX.precomputed_holidays)
]
print(f'today : {today}')
dates_krx_on = get_krx_on_dates_start_end('20210104', today)
if today in dates_krx_on :
date_ref = today
else :
date_ref = dates_krx_on[-1]
return (date_ref, n_days, period_extra)
##############################
# get market info ############
##############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_market_info(
market_info_dataset: Output[Dataset],
date_ref: str,
n_days: int
):
import pandas as pd
import pickle
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
from sqlalchemy import create_engine
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
AWS_DB_ID = 'gb_master'
AWS_DB_PWD = '<PASSWORD>'
AWS_DB_ADDRESS = 'kwdb-daily.cf6e7v8fhede.ap-northeast-2.rds.amazonaws.com'
AWS_DB_PORT = '3306'
DB_DATABASE_NAME_daily_naver = 'daily_naver'
db_daily_naver_con = create_engine('mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'
.format(AWS_DB_ID, AWS_DB_PWD, AWS_DB_ADDRESS, AWS_DB_PORT, DB_DATABASE_NAME_daily_naver),
encoding='utf8',
echo=False)
def get_market_from_naver_aws(date_ref):
'''
daily naver 에서 db값 그대로 parsing 내용 받아오기
'''
with db_daily_naver_con.connect() as conn:
table_name = f'{date_ref}_daily_allstock_naver'
str_sql = f'select * from {table_name} order by 등락률 DESC'
df = pd.read_sql_query(str_sql, conn) # self.get_db_daily_naver_con())
df = df.reset_index().rename(columns={'index':'순위_상승률', 'N':'순위_시가총액'})
df['순위_상승률'] = df.순위_상승률 + 1
return df
def get_krx_on_dates_n_days_ago(date_ref, n_days):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_markets_aws(date_ref, n_days):
dates_n_days_ago = get_krx_on_dates_n_days_ago(date_ref, n_days)
df_market = pd.DataFrame()
for date in dates_n_days_ago:
df_ = get_market_from_naver_aws(date)
logger.debug(f'date : {date} and df_.shape {df_.shape}' )
df_market = df_market.append(df_)
return df_market
df_market = get_markets_aws(date_ref=date_ref, n_days=n_days)
df_market.to_pickle(market_info_dataset.path, protocol=4)
#######################
# get bros ############
#######################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_bros(
date_ref: str,
n_days: int,
bros_univ_dataset: Output[Dataset]
):
import pandas as pd
import pickle
import pandas_gbq
import networkx as nx
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_corr_pairs_gbq(date_ref, period):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
sql = f'''
SELECT
DISTINCT source,
target,
corr_value,
period,
date
FROM
`dots-stock.krx_dataset.corr_ohlc_part1`
WHERE
date = "{date_ref_}"
AND period = {period}
ORDER BY
corr_value DESC
LIMIT
1000'''
PROJECT_ID = 'dots-stock'
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID)
return df
def find_bros(date_ref, period):
'''clique over 3 nodes '''
df_edgelist = get_corr_pairs_gbq(date_ref, period)
g = nx.from_pandas_edgelist(df_edgelist, edge_attr=True)
bros_ = nx.find_cliques(g)
bros_3 = [bros for bros in bros_ if len(bros) >=3]
set_bros = set([i for l_i in bros_3 for i in l_i])
g_gang = g.subgraph(set_bros)
df_gangs_edgelist = nx.to_pandas_edgelist(g_gang)
return df_gangs_edgelist
def find_gang(date_ref):
df_gang = pd.DataFrame()
for period in [20, 40, 60, 90, 120]:
df_ = find_bros(date, period=period)
df_gang = df_gang.append(df_)
return df_gang
# jobs
dates = get_krx_on_dates_n_days_ago(date_ref=date_ref, n_days=n_days)
df_bros = pd.DataFrame()
for date in dates:
df = find_gang(date_ref=date)
df_bros = df_bros.append(df)
df_bros.to_pickle(bros_univ_dataset.path, protocol=4)
###############################
# get adj price############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_adj_prices(
start_index :int,
end_index : int,
market_info_dataset: Input[Dataset],
adj_price_dataset: Output[Dataset]
):
# import json
import FinanceDataReader as fdr
from ae_module.ae_logger import ae_log
import pandas as pd
df_market = pd.read_pickle(market_info_dataset.path)
date_ref = df_market.날짜.max()
date_start = '20210101'
codes_stock = df_market[df_market.날짜 == date_ref].종목코드.to_list()
def get_price_adj(code, start, end):
return fdr.DataReader(code, start=start, end=end)
def get_price(l_univ, date_start, date_end):
df_price = pd.DataFrame()
for code in l_univ :
df_ = get_price_adj(code, date_start, date_end)
print('size', df_.shape)
df_['code'] = str(code)
df_price = df_price.append(df_)
return df_price
codes = codes_stock[ start_index : end_index ]
ae_log.debug(f'codes_stock {codes.__len__()}')
df_adj_price = get_price(codes, date_start=date_start, date_end=date_ref)
df_adj_price = df_adj_price.reset_index()
print('df_adj_cols =>', df_adj_price.columns)
df_adj_price.to_pickle(adj_price_dataset.path, protocol=4)
ae_log.debug(df_adj_price.shape)
###############################
# get full adj ############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
# packages_to_install=['pandas']
)
def get_full_adj_prices(
adj_price_dataset01: Input[Dataset],
adj_price_dataset02: Input[Dataset],
adj_price_dataset03: Input[Dataset],
adj_price_dataset04: Input[Dataset],
adj_price_dataset05: Input[Dataset],
full_adj_prices_dataset: Output[Dataset]
):
import pandas as pd
df_adj_price_01 = pd.read_pickle(adj_price_dataset01.path)
df_adj_price_02 = pd.read_pickle(adj_price_dataset02.path)
df_adj_price_03 = pd.read_pickle(adj_price_dataset03.path)
df_adj_price_04 = pd.read_pickle(adj_price_dataset04.path)
df_adj_price_05 = pd.read_pickle(adj_price_dataset05.path)
df_full_adj_prices = pd.concat([df_adj_price_01,
df_adj_price_02,
df_adj_price_03,
df_adj_price_04,
df_adj_price_05])
# df_full_adj_prices.to_csv(full_adj_prices_dataset.path)
df_full_adj_prices.to_pickle(full_adj_prices_dataset.path, protocol=4)
# with open(full_adj_prices_dataset.path, 'wb') as f:
# pickle.dump(df_full_adj_prices, f)
###############################
# get target ############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# base_image="amancevice/pandas:1.3.2-slim"
)
def get_target(
df_price_dataset: Input[Dataset],
df_target_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
def make_target(df):
df_ = df.copy()
df_.sort_values(by='date', inplace=True)
df_['high_p1'] = df_.high.shift(-1)
df_['high_p2'] = df_.high.shift(-2)
df_['high_p3'] = df_.high.shift(-3)
df_['close_p1'] = df_.close.shift(-1)
df_['close_p2'] = df_.close.shift(-2)
df_['close_p3'] = df_.close.shift(-3)
df_['change_p1'] = (df_.close_p1 - df_.close) / df_.close
df_['change_p2'] = (df_.close_p2 - df_.close) / df_.close
df_['change_p3'] = (df_.close_p3 - df_.close) / df_.close
df_['change_p1_over5'] = df_['change_p1'] > 0.05
df_['change_p2_over5'] = df_['change_p2'] > 0.05
df_['change_p3_over5'] = df_['change_p3'] > 0.05
df_['change_p1_over10'] = df_['change_p1'] > 0.1
df_['change_p2_over10'] = df_['change_p2'] > 0.1
df_['change_p3_over10'] = df_['change_p3'] > 0.1
df_['close_high_1'] = (df_.high_p1 - df_.close) / df_.close
df_['close_high_2'] = (df_.high_p2 - df_.close) / df_.close
df_['close_high_3'] = (df_.high_p3 - df_.close) / df_.close
df_['close_high_1_over10'] = df_['close_high_1'] > 0.1
df_['close_high_2_over10'] = df_['close_high_2'] > 0.1
df_['close_high_3_over10'] = df_['close_high_3'] > 0.1
df_['close_high_1_over5'] = df_['close_high_1'] > 0.05
df_['close_high_2_over5'] = df_['close_high_2'] > 0.05
df_['close_high_3_over5'] = df_['close_high_3'] > 0.05
df_['target_over10'] = np.logical_or.reduce([
df_.close_high_1_over10,
df_.close_high_2_over10,
df_.close_high_3_over10])
df_['target_over5'] = np.logical_or.reduce([
df_.close_high_1_over5,
df_.close_high_2_over5,
df_.close_high_3_over5])
df_['target_close_over_10'] = np.logical_or.reduce([
df_.change_p1_over10,
df_.change_p2_over10,
df_.change_p3_over10])
df_['target_close_over_5'] = np.logical_or.reduce([
df_.change_p1_over5,
df_.change_p2_over5,
df_.change_p3_over5])
df_['target_mclass_close_over10_under5'] = \
np.where(df_['change_p1'] > 0.1,
1, np.where(df_['change_p1'] > -0.05, 0, -1))
df_['target_mclass_close_p2_over10_under5'] = \
np.where(df_['change_p2'] > 0.1,
1, np.where(df_['change_p2'] > -0.05, 0, -1))
df_['target_mclass_close_p3_over10_under5'] = \
np.where(df_['change_p3'] > 0.1,
1, np.where(df_['change_p3'] > -0.05, 0, -1))
df_.dropna(subset=['high_p3'], inplace=True)
return df_
def get_target_df(df_price):
df_price.reset_index(inplace=True)
df_price.columns = df_price.columns.str.lower()
df_target = df_price.groupby('code').apply(lambda df: make_target(df))
df_target = df_target.reset_index(drop=True)
# df_target['date'] = df_target.date.str.replace('-', '')
return df_target
# df_price = pd.read_csv(df_price_dataset.path, index_col=0)
df_price = pd.read_pickle(df_price_dataset.path)
# with open(df_price_dataset.path, 'rb') as f:
# df_price = pickle.load(f)
print('df cols =>', df_price.columns)
df_target = get_target_df(df_price=df_price)
df_target.to_pickle(df_target_dataset.path, protocol=4)
###############################
# get tech indicator ##########
###############################
@component(
# base_image="gcr.io/dots-stock/py38-pandas-cal",
base_image="gcr.io/dots-stock/python-img-v5.2",
packages_to_install=["stockstats", "scikit-learn"]
)
def get_tech_indi(
# date_ref: str,
df_price_dataset: Input[Dataset],
df_techini_dataset: Output[Dataset],
):
from stockstats import StockDataFrame as Sdf
# from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import maxabs_scale
import pandas as pd
import pickle
class FeatureEngineer:
"""Provides methods for preprocessing the stock price data
Attributes
----------
use_technical_indicator : boolean
we technical indicator or not
tech_indicator_list : list
a list of technical indicator names (modified from config.py)
use_turbulence : boolean
use turbulence index or not
user_defined_feature:boolean
user user defined features or not
Methods
-------
preprocess_data()
main method to do the feature engineering
"""
TECHNICAL_INDICATORS_LIST = ['macd',
'boll_ub',
'boll_lb',
'rsi_30',
'dx_30',
'close_30_sma',
'close_60_sma',
# 'mfi',
]
# PERIOD_MAX = 60,
def __init__(
self,
use_technical_indicator=True,
tech_indicator_list=TECHNICAL_INDICATORS_LIST,
user_defined_feature=False,
):
self.use_technical_indicator = use_technical_indicator
self.tech_indicator_list = tech_indicator_list
self.user_defined_feature = user_defined_feature
def preprocess_data(self, df):
"""main method to do the feature engineering
@:param config: source dataframe
@:return: a DataMatrices object
"""
#clean data
# df = self.clean_data(df)
# add technical indicators using stockstats
if self.use_technical_indicator == True:
df = self.add_technical_indicator(df)
print("Successfully added technical indicators")
# add user defined feature
if self.user_defined_feature == True:
df = self.add_user_defined_feature(df)
print("Successfully added user defined features")
# fill the missing values at the beginning and the end
df = df.fillna(method="bfill").fillna(method="ffill")
return df
def clean_data(self, data):
"""
clean the raw data
deal with missing values
reasons: stocks could be delisted, not incorporated at the time step
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df=df.sort_values(['date','tic'],ignore_index=True) ##
df.index = df.date.factorize()[0]
merged_closes = df.pivot_table(index = 'date',columns = 'tic', values = 'close')
merged_closes = merged_closes.dropna(axis=1)
tics = merged_closes.columns
df = df[df.tic.isin(tics)]
return df
def add_technical_indicator(self, data):
"""
calculate technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
stock = Sdf.retype(df.copy())
unique_ticker = stock.tic.unique()
for indicator in self.tech_indicator_list:
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator =
|
pd.DataFrame(temp_indicator)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
r"""
tsgettoolbox command line/library tools to retrieve time series.
This program is a collection of utilities to download data from various
web services.
"""
from __future__ import absolute_import, division, print_function
import logging
import os
import warnings
from io import BytesIO
import mando
import pandas as pd
try:
from mando.rst_text_formatter import RSTHelpFormatter as HelpFormatter
except ImportError:
from argparse import RawTextHelpFormatter as HelpFormatter
from tstoolbox import tsutils
from tsgettoolbox import utils
warnings.filterwarnings("ignore")
nwis_docstrings = {
"filter_descriptions": r"""
Detailed documentation is available at http://waterdata.usgs.gov/nwis.
Site local time is output, even if multiple sites are requested and
sites are in different time zones. Note that the measurement time
zone at a site may not be the same as the time zone actually in
effect at the site.
Every query requires a major filter. Pick the major filter
('--sites', '--stateCd', '--huc', '--bBox', '--countyCd') that best
retrieves the data for the sites that you are interested in. You
can have only one major filter per query. If you specify more than
one major filter, you will get an error.
**Major Filter**
Select ONE of::
'--sites',
'--stateCd',
'--huc',
'--bBox', or
'--countyCd'
**Minor Filters**
Additional filters can be applied after specifying a major filter.
This further reduces the set of expected results. Users are
encouraged to use minor filters because it allows more efficient use
of this service.
Use as many as desired to limit number of retrieved time series::
'--agencyCd',
'--altMax',
'--altMin',
'--aquiferCd',
'--drainAreaMax',
'--drainAreaMin',
'--holeDepthMax'
'--holeDepthMin',
'--localAquiferCd',
'--modifiedSince',
'--parameterCd',
'--siteStatus',
'--siteType',
'--wellDepthMax',
'--wellDepthMin',
""",
"results_ts": r"""
**Results**
The column name in the resulting table is made up of
"agencyCd_siteno_parameterCd", for example "USGS_02248380_00010".
The agency and parameter codes are described in the `agencyCd` and
`parameterCd` options below.
If `includeCodes` option is used, there will also be columns representing
the data quality codes named "agencyCd_siteno_parameterCd_cd".
+---------+--------------------------------------------------------+
| Code | Description |
+=========+========================================================+
| e | Value has been edited or estimated by USGS personnel |
| | and is write protected |
+---------+--------------------------------------------------------+
| & | Value was computed from affected unit values |
+---------+--------------------------------------------------------+
| E | Value was computed from estimated unit values. |
+---------+--------------------------------------------------------+
| A | Approved for publication -- Processing and review |
| | completed. |
+---------+--------------------------------------------------------+
| P | Provisional data subject to revision. |
+---------+--------------------------------------------------------+
| < | The value is known to be less than reported value and |
| | is write protected. |
+---------+--------------------------------------------------------+
| > | The value is known to be greater than reported value |
| | and is write protected. |
+---------+--------------------------------------------------------+
| 1 | Value is write protected without any remark code to be |
| | printed |
+---------+--------------------------------------------------------+
| 2 | Remark is write protected without any remark code to |
| | be printed |
+---------+--------------------------------------------------------+
| | No remark (blank) |
+---------+--------------------------------------------------------+
| Ssn | Parameter monitored seasonally |
+---------+--------------------------------------------------------+
| Ice | Ice affected |
+---------+--------------------------------------------------------+
| Pr | Partial-record site |
+---------+--------------------------------------------------------+
| Rat | Rating being developed or revised |
+---------+--------------------------------------------------------+
| Eqp | Equipment malfunction |
+---------+--------------------------------------------------------+
| Fld | Flood damage |
+---------+--------------------------------------------------------+
| Dis | Data-collection discontinued |
+---------+--------------------------------------------------------+
| Dry | Dry |
+---------+--------------------------------------------------------+
| -- | Parameter not determined |
+---------+--------------------------------------------------------+
| Mnt | Maintenance in progress |
+---------+--------------------------------------------------------+
| ZFl | Zero flow |
+---------+--------------------------------------------------------+
| ``***`` | Temporarily unavailable |
+---------+--------------------------------------------------------+
""",
"includeCodes": r"""includeCodes
[optional, default is False]
Whether or not to include the metadata/quality code column.
Useful to almost halve the size of the pandas DataFrame.""",
"sites": r"""sites : str
[optional, default is None, major site filter]
Want to only query one site? Use sites as your major filter, and
put only one site number in the list. Sites are comma
separated. Sites may be prefixed with an optional agency code
followed by a colon. If you do not know the site numbers you
need, you can find relevant sites with the NWIS Mapper
(http://wdr.water.usgs.gov/nwisgmap/index.html) or on the USGS
Water Data for the Nation site.
(http://waterdata.usgs.gov/nwis/)
Can have from 1 to 100 comma separated site numbers::
--sites=USGS:01646500
--sites=01646500,06306300""",
"stateCd": r"""stateCd : str
[optional, default is None, major site filter]
U.S. postal service (2-digit) state code. Can have only 1 state
code. List is available at
http://www.usps.com/ncsc/lookups/usps_abbreviations.html::
--stateCd=NY""",
"huc": r"""huc : str
[optional, default is None, major site filter]
A list of hydrologic unit codes (HUC) or watersheds. Only
1 major HUC can be specified per request. A major HUC has two
digits. Minor HUCs must be eight digits in length. Can have
1 to 10 HUC codes. List of HUCs is available at
http://water.usgs.gov/GIS/huc_name.html::
--huc=01,02070010""",
"bBox": r"""bBox :
[optional, default is None, major site filter]
A contiguous range of decimal latitude and longitude, starting
with the west longitude, then the south latitude, then the east
longitude, and then the north latitude with each value separated
by a comma. The product of the range of latitude and longitude
cannot exceed 25 degrees. Whole or decimal degrees must be
specified, up to six digits of precision. Minutes and seconds
are not allowed. Remember: western longitude (which includes
almost all of the United States) is specified in negative
degrees. Caution: many sites outside the continental US do not
have latitude and longitude referenced to NAD83 and therefore
can not be found using these arguments. Certain sites are not
associated with latitude and longitude due to homeland security
concerns and cannot be found using this filter.::
--bBox=-83,36.5,-81,38.5""",
"countyCd": r"""countyCd :
[optional, default is None, major site filter]
A list of county numbers, in a 5 digit numeric format. The first
two digits of a county's code are the FIPS State Code. Can have
from 1 to 20 county codes. The first 2 digits are the FIPS
State Code (http://www.itl.nist.gov/fipspubs/fip5-2.htm) and the
list of county codes are at
http://help.waterdata.usgs.gov/code/county_query?fmt=html::
--countyCd=51059,51061""",
"parameterCd": r"""parameterCd :
[optional, default is None, minor site filter]
USGS time-series parameter code. All parameter codes are
numeric and 5 characters in length. Parameter codes are used to
identify the constituent measured and the units of measure.
Popular codes include stage (00065), discharge in cubic feet per
second (00060) and water temperature in degrees Celsius (00010).
Can request from 1 to 100 "parameterCD"s. Default: returns all
regular time-series for the requested sites.
Complete list::
http://help.waterdata.usgs.gov/codes-and-parameters/parameters::
--parameterCd=00060 # discharge, cubic feet
# per second
--parameterCd=00060,00065 # discharge,
# cubic feet per second
# and gage height in
# feet""",
"siteType": r"""siteType :
[optional, default is None, minor site filter]
Restricts sites to those having one or more major and/or minor
site types. If you request a major site type (ex: &siteType=ST)
you will get all sub-site types of the same major type as well
(in this case, ST-CA, ST-DCH and ST-TS). Can have from 1 to an
unlimited number of siteType codes. Default is to return all
types. List of valid site types:
http://help.waterdata.usgs.gov/site_tp_cd::
--siteType=ST # Streams only
--siteType=ST,LA-OU # Streams and Land Outcrops only""",
"modifiedSince": r"""modifiedSince :
[optional, default is None, minor site filter]
Returns all values for sites and period of record requested only
if any values have changed over the last modifiedSince period.
modifiedSince is useful if you periodically need to poll a site
but are only interested in getting data if some of it has
changed. It is typically be used with period, or startDT/endDT
but does not have to be. In the latter case, if any values were
changed during the specified modifiedSince period, only the most
recent values would be retrieved for those sites. This is
a typical usage, since users typically are polling a site and
only want data if there are new or changed measurements.
ISO-8601 duration format is always used. There is no default.
(http://en.wikipedia.org/wiki/ISO_8601#Durations)::
--modifiedSince=PT2H
# Retrieves all values for sites and period of record
# requested for any of the requested sites and
# parameters, but only for sites where any of the
# values changed during the last two hours.
--modifiedSince=PT2H --period=P1D
# Retrieve all values for sites and period of record
# requested for the last 24 hours from now only for
# sites and parameters that had any values that
# changed or were added during the last two hours.
--modifiedSince=PT2H --startDt=2010-11-01 --endDt=2010-11-02
# Retrieve all values for sites and period of record
# requested for sites and parameters that had values
# change between midnight site local time on Nov 1st,
# 2010 and 23:59 on Nov 2nd, 2010 site local time,
# only if values were changed or added within the
# last two hours.""",
"agencyCd": r"""agencyCd :
[optional, default is None, minor site filter]
The list of sites returned are filtered to return only those
with the provided agency code. The agency code describes the
organization that maintains the site. Only one agency code is
allowed and is optional. An authoritative list of agency codes
can be found here. Default is to return all sites regardless of
agency code. List:
http://help.waterdata.usgs.gov/code/agency_cd_query?fmt=html::
--stateCd=il --agencyCd=USCE # Only US Army Corps
# of Engineers sites
# in Illinois""",
"siteStatus": r"""siteStatus :
[optional, default is None, minor site filter]
Selects sites based on whether or not they are active. If a site
is active, it implies that it is being actively maintained.
A site is considered active if: it has collected time-series
(automated) data within the last 183 days (6 months), or it has
collected discrete (manually collected) data within 397 days (13
months) If it does not meet these criteria, it is considered
inactive. Some exceptions apply. If a site is flagged by a USGS
water science center as discontinued, it will show as inactive.
A USGS science center can also flag a new site as active even if
it has not collected any data. The default is all (show both
active and inactive sites). Chose between, 'all', 'active', or
'inactive'. Default all - sites of any activity status are
returned.::
--siteStatus='active'""",
"altMin": r"""altMin : float
[optional, default is None, minor site filter]
These arguments allows you to select instantaneous values sites
where the associated sites' altitude are within a desired
altitude, expressed in feet. Altitude is based on the datum
used at the site. Providing a value to altMin (minimum
altitude) means you want sites that have or exceed the altMin
value. You may specify decimal feet if precision is critical If
both the altMin and altMax are specified, sites at or between
the minimum and maximum altitude are returned.""",
"altMax": r"""altMax : float
[optional, default is None, minor site filter]
Providing a value to altMax (maximum altitude) means you want
sites that have or are less than the altMax value.::
--altMin=1000 --altMax=5000
# Return sites where the altitude is 1000 feet or
# greater and 5000 feet or less.
--altMin=12.5 --altMax=13
# Return sites where the altitude is 12.5 feet or
# greater and 13 feet or less.""",
"drainAreaMin": r"""drainAreaMin : float
[optional, default is None, minor site filter]
SURFACE WATER SITE ATTRIBUTE
These arguments allows you to select principally surface water
sites where the associated sites' drainage areas (watersheds)
are within a desired size, expressed in square miles or decimal
fractions thereof. Providing a value to drainAreaMin (minimum
drainage area) means you want sites that have or exceed the
drainAreaMin value. The values may be expressed in decimals. If
both the drainAreaMin and drainAreaMax are specified, sites at
or between the minimum and maximum drainage areas values
specified are returned Caution: not all sites are associated
with a drainage area. Caution: drainage area generally only
applies to surface water sites. Use with other site types, such
as groundwater sites, will likely retrieve no results.""",
"drainAreaMax": r"""drainAreaMax: float
[optional, default is None, minor site filter]
SURFACE WATER SITE ATTRIBUTE
Providing a value to drainAreaMax (maximum drainage area) means
you want sites that have or are less than the drainAreaMax
value.::
--drainAreaMin=1000 --drainAreaMax=5000
# Return sites where the drainage area
# is 1000 square miles or greater and
# is 5000 square miles or less.
--drainAreaMin=10.5 --drainAreaMax=10.7
# Return sites where the drainage area
# is 10.5 square miles or greater and
# is 10.7 square miles or less.""",
"aquiferCd": r"""aquiferCd
[optional, default is None, minor site filter]
Used to filter sites to those that exist in specified national
aquifers. Note: not all sites have been associated with national
aquifers. Enter one or more national aquifer codes, separated
by commas. A national aquifer code is exactly 10 characters.
You can have up to 1000 aquiferCd codes. Complete list:
http://water.usgs.gov/ogw/NatlAqCode-reflist.html::
--aquiferCd=S500EDRTRN,N100HGHPLN
# returns groundwater sites for the
# Edwards-Trinity aquifer system and
# the High Plains national
# aquifers.""",
"localAquiferCd": r"""localAquiferCd
[optional, default is None, minor site filter]
Used to filter sites to those that exist in specified local
aquifers. Note: not all sites have been associated with local
aquifers. Enter one or more local aquifer codes, separated by
commas. A local aquifer code begins with a 2 character state
abbreviation (such as TX for Texas) followed by a colon followed
by the 7 character aquifer code. Can have 0 to 1000 comma
delimited codes. Complete list:
http://help.waterdata.usgs.gov/code/aqfr_cd_query?fmt=html
To translate state codes associated with the local aquifer you
may need this reference:
http://www.itl.nist.gov/fipspubs/fip5-2.htm ::
--localAquiferCd=AL:111RGLT,AL:111RSDM
# returns sites for the Regolith and Saprolite local
# aquifers in Alabama""",
"wellDepthMin": r"""wellDepthMin : float
[optional, default is None, minor site filter]
GROUNDWATER SITE ATTRIBUTE
These arguments allows you to select groundwater sites serving
data recorded automatically where the associated sites' well
depth are within a desired depth, expressed in feet from the
land surface datum. Express well depth as a positive number.
Providing a value to wellDepthMin (minimum well depth) means you
want sites that have or exceed the wellDepthMin value. The
values may be expressed in decimals Caution: well depth applies
to groundwater sites only.::
--wellDepthMin=100 --wellDepthMax=500
# Return daily value sites where the well depth is
# 100 feet or greater and 500 feet or less.""",
"wellDepthMax": r"""wellDepthMax : float
[optional, default is None, minor site filter]
GROUNDWATER SITE ATTRIBUTE
Providing a value to wellDepthMax (maximum well depth) means you
want sites that have or are less than the wellDepthMax value.::
--wellDepthMin=10.5 --wellDepthMax=10.7
# Return daily value sites where the well depth is
# 10.5 feet or greater and 10.7 feet or less.
If both the wellDepthMin and wellDepthMax are specified, sites
at or between the minimum and maximum well depth values
specified are returned wellDepthMax should be greater than or
equal to wellDepthMin.""",
"holeDepthMin": r"""holeDepthMin : float
[optional, default is None, minor site filter]
GROUNDWATER SITE ATTRIBUTE
These arguments allows you to select groundwater sites serving
data recorded automatically where the associated sites' hole
depth are within a desired depth, expressed in feet from the
land surface datum. Express hole depth as a positive number.
Providing a value to holeDepthMin (minimum hole depth) means you
want sites that have or exceed the holeDepthMin value. The
values may be expressed in decimals Caution: hole depth applies
to groundwater sites only.""",
"holeDepthMax": r"""holeDepthMax : float
[optional, default is None, minor site filter]
GROUNDWATER SITE ATTRIBUTE
Providing a value to holeDepthMax (maximum hole depth) means you
want sites that have or are less than the holeDepthMax value.::
--holeDepthMin=100 --holeDepthMax=500
# Return daily values sites where the hole depth is
# 100 feet or greater and 500 feet or less.
--holeDepthMin=10.5 --holeDepthMax=10.7
# Return daily value sites where the hole depth is
# 10.5 feet or greater and 10.7 feet or less.
If both the holeDepthMin and holeDepthMax are specified, sites
at or between the minimum and maximum hole depth values
specified are returned holeDepthMax should be greater than or
equal to holeDepthMin.""",
"period": r"""period
[optional, default is None]
Get a range of values from now by specifying the period argument
period must be in ISO-8601 Duration format.
(http://en.wikipedia.org/wiki/ISO_8601#Durations) Negative
periods (ex: P-T2H) are not allowed. Data are always returned
up to the most recent value, which in the case of a predictive
gage might be in the future. When specifying days from now, the
first value will probably not be at midnight of the first day,
but somewhat before exactly 24 hours from now.::
--period=PT2H
# Retrieve last two hours from now up to most recent
# instantaneous value)
--period=P7D
# Retrieve last seven days up from now to most recent
# instantaneous value)""",
"startDT": r"""startDT
[optional, default is None]
Get a range of values from an explicit begin or end date/time.
Use the startDT and endDT arguments. Site local time is output,
even if multiple sites are requested and sites are in different
time zones. Note that the measurement time zone at a site may
not be the same as the time zone actually in effect at the site.
Both startDt and endDt must be in ISO-8601 Date/Time format.
(http://en.wikipedia.org/wiki/ISO_8601#Dates) You can express
the date and time in a timezone other than site local time if
you want as long as it follows the ISO standard. For example,
you can express the time in Universal time: 2014-03-20T00:00Z.
If startDT is supplied and endDT is not, endDT ends with the
most recent instantaneous value. startDT must be chronologically
before endDT.
If startDt shows the date and not the time of day (ex:
2010-09-01) the time of midnight site time is assumed
(2010-09-01T00:00) If endDt shows the date and not the time of
day (ex: 2010-09-02) the last minute before midnight site time
is assumed (2010-09-02T23:59). Remember, only data from October
1, 2007 are currently available in the 'iv' database.""",
"endDT": r"""endDT
[optional, default is None]
If endDT is present, startDt must also be
present.::
--startDT=2010-11-22 --endDT=2010-11-22 # Full day, 00:00 to 23:59
--startDT=2010-11-22T12:00 --endDT=2010-11-22T18:00
--startDT=2010-11-22 --endDT=2010-11-22
--startDT=2010-11-22T12:00 # From "startDT" to most recent
# instantaneous value""",
"statReportType": r"""statReportType : str
[optional, default is 'daily']
The type of statistics desired. Valid statistic report types
include:
+----------------+------------------------------------------+
| statReportType | Description |
+----------------+------------------------------------------+
| daily | daily statistics (default) |
| | statistic across years |
+----------------+------------------------------------------+
| monthly | monthly statistics (monthly time-series) |
+----------------+------------------------------------------+
| annual | annual statistics, based on either |
| | calendar year or water year, as defined |
| | by statYearType. If statYearType is not |
| | provided, calendar year statistics are |
| | assumed. (annual time-series) |
+----------------+------------------------------------------+""",
"statType": r"""statType : str
[optional, default is None, minor site filter]
Selects sites based on the statistics type(s) desired, such as
minimum, maximum or mean
For all statReportType types include::
mean - arithmetic mean or average
all - selects all available statistics
For daily statistics you can also specify::
min - minimum, or smallest value found for the
daily statistics
max - maximum, or largest value found for the
daily statistics
median - the numerical value separating the higher
half of a the data from the lower half,
same as specifying P50. If used median
will be represented by the column name
p50_va.
P05, P10, P20, P25, P50, P75, P80, P90, P95
with the number indicating percentile. Note:
the service can calculate only these
percentiles.""",
"missingData": r"""missingData
[optional, default is None]
Used to indicate the rules to follow to generate statistics if
there are gaps in the period of record during the requested
statistics period. By default if there are any missing data for
the report type, the statistic is left blank or null.
This option does not apply to daily statistics, but optionally
can be used with monthly and yearly statistics. If used with
daily statistics, an error will occur.
Missing data can happen for various reasons including there was
a technical problem with the gage for part of the time period.
Enabling this switch will attempt to provide a statistic if
there is enough data to create one.
Choice is 'off' or 'on'.""",
"statisticsCd": r"""statisticsCd
[optional, default is None]
The statisticsCd represents how the instantaneous values are
aggregated. The statisticsCd is from the following table:
+-------+------------------------------------+
| Code | Description |
+=======+====================================+
| 00001 | MAXIMUM VALUES |
+-------+------------------------------------+
| 00002 | MINIMUM VALUES |
+-------+------------------------------------+
| 00003 | MEAN VALUES |
+-------+------------------------------------+
| 00004 | VALUES TAKEN BETWEEN 0001 AND 1200 |
+-------+------------------------------------+
| 00005 | VALUES TAKEN BETWEEN 1201 AND 2400 |
+-------+------------------------------------+
| 00006 | SUMMATION VALUES |
+-------+------------------------------------+
| 00007 | MODAL VALUES |
+-------+------------------------------------+
| 00008 | MEDIAN VALUES |
+-------+------------------------------------+
| 00009 | STANDARD DEVIATION VALUES |
+-------+------------------------------------+
| 00010 | VARIANCE VALUES |
+-------+------------------------------------+
| 00011 | RANDOM INSTANTANEOUS VALUES |
+-------+------------------------------------+
| 00012 | EQUIVALENT MEAN VALUES |
+-------+------------------------------------+
| 00013 | SKEWNESS VALUES |
+-------+------------------------------------+
| 00021 | TIDAL HIGH-HIGH VALUES |
+-------+------------------------------------+
| 00022 | TIDAL LOW-HIGH VALUES |
+-------+------------------------------------+
| 00023 | TIDAL HIGH-LOW VALUES |
+-------+------------------------------------+
| 00024 | TIDAL LOW-LOW VALUES |
+-------+------------------------------------+
| 01XXY | XX.Y PERCENTILE |
+-------+------------------------------------+
| 02LLL | LLL DAY LOW MEAN |
+-------+------------------------------------+
| 03HHH | HHH DAY HIGH MEAN |
+-------+------------------------------------+
| 3TTTT | INSTANTANEOUS OBSERVATION AT TTTT |
+-------+------------------------------------+""",
"siteOutput": r"""siteOutput
[optional, default is None]
If you would like to see expanded site information, check this
box. This argument is ignored for visually oriented output
formats like Mapper, Google Earth and Google Maps. The default
is basic. Use expanded to get expanded site information.
Example: &siteOutput=expanded. Note: for performance reasons,
&siteOutput=expanded cannot be used if seriesCatalogOutput=true
or with any values for outputDataTypeCd.""",
"seriesCatalogOutput": r"""seriesCatalogOutput
[optional, default is None]
This argument is ignored for visually oriented output formats
like Mapper, Google Earth and Google Maps. If you would like to
see all the period of record information for the sites selected,
check this box. You will see detailed information, such as
a continuous range of dates served by a site for one or more
data types, for example, the begin and end dates that streamflow
(parameter 00060) was recorded at a site. Note: if you select
any data types for output (see below) the period of record data
will also appear. In that case specifying this argument is
unnecessary. The default is false. The only legal values for
this argument are true and false. Example:
&seriesCatalogOutput=true.
&seriesCatalogOutput=true is equivalent to
&outputDataTypeCd=all. Note: for performance reasons,
&siteOutput=expanded cannot be used if
seriesCatalogOutput=true.""",
"outputDataTypeCd": r"""outputDataTypeCd
[optional, default is None]
This will add period of record information to certain output
formats (GML, RDB and JSON) that summarize information about the
data types requested. The default is all data types. Some
output formats are designed for visual use (Google Earth, Google
Maps and Mapper). Consequently with these formats you will not
see data type code information.
Default information: If seriesCatalogOutput is true, all period
of record information is shown by default. If
seriesCatalogOutput is false, unless you override it using one
of the values below, no period of record information is shown.
Note: for performance reasons, &siteOutput=expanded cannot be
used if with any values for outputDataTypeCd.
Here are the various output data type codes available. These can
be selected individually or can be added as comma separated
values if desired. Example: &outputDataTypeCd=iv,dv
+-----+---------------------------------------------------------------+
| all | default (see above for qualifications). This is equivalent to |
| | &seriesCatalogOutput=true. |
+-----+---------------------------------------------------------------+
| iv | Instantaneous values (time-series measurements typically |
| | recorded by automated equipment at frequent intervals (e.g., |
| | hourly) |
+-----+---------------------------------------------------------------+
| uv | Unit values (alias for iv) |
+-----+---------------------------------------------------------------+
| rt | Real-time data (alias for iv) |
+-----+---------------------------------------------------------------+
| dv | Daily values (once daily measurements or summarized |
| | information for a particular day, such as daily maximum, |
| | minimum and mean) |
+-----+---------------------------------------------------------------+
| pk | Peaks measurements of water levels and streamflow for surface |
| | water sites (such as during floods, may be either an |
| | automated or a manual measurement) |
+-----+---------------------------------------------------------------+
| sv | Site visits (irregular manual surface water measurements, |
| | excluding peak measurements) |
+-----+---------------------------------------------------------------+
| gw | Groundwater levels measured at irregular, discrete intervals. |
| | For recorded, time series groundwater levels, use iv or id. |
+-----+---------------------------------------------------------------+
| qw | Water-quality data from discrete sampling events and analyzed |
| | in the field or in a laboratory. For recorded time series |
| | water-quality data, use iv or id. |
+-----+---------------------------------------------------------------+
| id | Historical instantaneous values (sites in the USGS |
| | Instantaneous Data Archive External Link) |
+-----+---------------------------------------------------------------+
| aw | Sites monitored by the USGS Active Groundwater Level Network |
| | External Link |
+-----+---------------------------------------------------------------+
| ad | Sites included in USGS Annual Water Data Reports External |
| | Link} |
+-----+---------------------------------------------------------------+""",
"siteName": r"""siteName
[optional, default is None, minor site filter]
This filter allows you to find a site by its name, using either
the exact site name or a partial site name. Note that a major
filter is still required. String matches are case insensitive,
so if you specify "Boulder" you will retrieve site names with
"Boulder", "boulder", "BOULDER" as well as many other variants.
To embed a space, you can use single quotes. Examaple:
--siteName='Boulder Creek'""",
"siteNameMatchOperator": r"""siteNameMatchOperator
[optional, default is None, minor site filter]
If used, this must be used with siteName. It determines how the
pattern matching for the site name behaves. Matches are case
insensitive. The options are::
start = The string must be at the start of the site name (default)
any = The string must be contained somewhere in the site name
exact = The site name must exactly match the string supplied, with
the exception that the match is not case sensitive
Example: &siteNameMatchOperator=any""",
"hasDataTypeCd": r"""hasDataTypeCd
[optional, default is None, minor site filter]
Default is all. Restricts results to those sites that collect
certain kinds of data. Separate values with commas. Allowed
values are:
+-----+---------------------------------------------------------------+
| all | default (see above for qualifications). This is equivalent to |
| | &seriesCatalogOutput=true. |
+-----+---------------------------------------------------------------+
| iv | Instantaneous values (time-series measurements typically |
| | recorded by automated equipment at frequent intervals (e.g., |
| | hourly) |
+-----+---------------------------------------------------------------+
| uv | Unit values (alias for iv) |
+-----+---------------------------------------------------------------+
| rt | Real-time data (alias for iv) |
+-----+---------------------------------------------------------------+
| dv | Daily values (once daily measurements or summarized |
| | information for a particular day, such as daily maximum, |
| | minimum and mean) |
+-----+---------------------------------------------------------------+
| pk | Peaks measurements of water levels and streamflow for surface |
| | water sites (such as during floods, may be either an |
| | automated or a manual measurement) |
+-----+---------------------------------------------------------------+
| sv | Site visits (irregular manual surface water measurements, |
| | excluding peak measurements) |
+-----+---------------------------------------------------------------+
| gw | Groundwater levels measured at irregular, discrete intervals. |
| | For recorded, time series groundwater levels, use iv or id. |
+-----+---------------------------------------------------------------+
| qw | Water-quality data from discrete sampling events and analyzed |
| | in the field or in a laboratory. For recorded time series |
| | water-quality data, use iv or id. |
+-----+---------------------------------------------------------------+
| id | Historical instantaneous values (sites in the USGS |
| | Instantaneous Data Archive External Link) |
+-----+---------------------------------------------------------------+
| aw | Sites monitored by the USGS Active Groundwater Level Network |
| | External Link |
+-----+---------------------------------------------------------------+
| ad | Sites included in USGS Annual Water Data Reports External |
| | Link |
+-----+---------------------------------------------------------------+""",
"statYearType": r"""statYearType
[optional, default is None]
Indicates which kind of year statistics should be created
against. This only applies when requesting annual statistics,
i.e. statReportType=annual. Valid year types codes include:
+----------+----------------------------------------------------------+
| calendar | calendar year, i.e. January 1 through December 31 |
+----------+----------------------------------------------------------+
| water | water year, i.e. a year begins October 1 of the previous |
| | year and ends September 30 of the current year. This is |
| | the same as a federal fiscal year. |
+----------+----------------------------------------------------------+""",
}
_NA_VALUES = ["Dis", "Eqp", "Rat"]
# USGS
# IV
#
# agency_cd
# site_no
# datetime
# tz_cd
# 30725_00060
# 30725_00060_cd
# 196788_00065
# 196788_00065_cd
#
# DV
#
# agency_cd
# site_no
# datetime
# 68479_00010_00001
# 68479_00010_00001_cd
# 68482_00010_00001
# 68482_00010_00001_cd
#
# STAT
#
# agency_cd
# site_no
# station_nm
# site_tp_cd
# dec_lat_va
# dec_long_va
# coord_acy_cd
# dec_coord_datum_cd
# alt_va
# alt_acy_va
# alt_datum_cd
# huc_cd
#
# GWLEVELS
#
# agency_cd
# site_no
# site_tp_cd
# lev_dt
# lev_tm
# lev_tz_cd
# lev_va
# sl_lev_va
# sl_datum_cd
# lev_status_cd
# lev_agency_cd
# lev_dt_acy_cd
# lev_acy_cd
# lev_src_cd
# lev_meth_cd
# lev_age_cd
#
# STATS
#
# agency_cd
# site_no
# parameter_cd
# ts_id
# loc_web_ds
# month_nu
# day_nu
# begin_yr
# end_yr
# count_nu
# max_va_yr
# max_va
# min_va_yr
# min_va
# mean_va
# p05_va
# p10_va
# p20_va
# p25_va
# p50_va
# p75_va
# p80_va
# p90_va
# p95_va
def _read_rdb(url, data):
# parameter_cd parameter_group_nm parameter_nm casrn srsname parameter_units
pmcodes = pd.read_csv(
os.path.join(os.path.dirname(__file__), "../station_metadata/nwis_pmcodes.dat"),
comment="#",
header=0,
sep="\t",
dtype={0: str},
na_values=_NA_VALUES,
)
pmcodes.set_index("parameter_cd", inplace=True)
session = utils.requests_retry_session()
req = session.get(url, params=data)
if os.path.exists("debug_tsgettoolbox"):
logging.warning(req.url)
req.raise_for_status()
header = [0, 1]
if "/measurements/" in url:
header = [0]
if "/iv/" in url or "/dv/" in url:
# iv and dv results are stacked, a table for each site. Have to split
# the overall req.content into discrete tables for pd.read_csv to work.
list_of_sublists = []
n = 0
a_list = req.content.splitlines()
for i, elt in enumerate(a_list):
if i and elt[:9] == b"agency_cd":
list_of_sublists.append(a_list[n:i])
n = i
list_of_sublists.append(a_list[n:])
ndf = pd.DataFrame()
for site in list_of_sublists:
try:
adf = pd.read_csv(
BytesIO(b"\n".join(site)),
comment="#",
header=header,
sep="\t",
dtype={"site_no": str},
na_values=_NA_VALUES,
)
except pd.errors.EmptyDataError:
continue
adf.columns = [i[0] for i in adf.columns]
test_cnames = []
not_ts = []
for cname in adf.columns:
words = cname.split("_")
try:
_ = int(words[0])
if "cd" == words[-1]:
test_cnames.append(cname)
else:
test_cnames.append(
cname
+ ":{}".format(pmcodes.loc[words[1], "parameter_units"])
)
except ValueError:
test_cnames.append(cname)
not_ts.append(cname)
adf.columns = test_cnames
adf.set_index(not_ts, inplace=True)
if len(ndf) == 0:
ndf = adf
else:
ndf = ndf.join(adf, how="outer")
ndf.reset_index(inplace=True)
else:
ndf = pd.read_csv(
BytesIO(req.content),
comment="#",
header=header,
sep="\t",
dtype={"site_no": str, "parameter_cd": str, "ts_id": str},
na_values=_NA_VALUES,
)
ndf.columns = [i[0] for i in ndf.columns]
return ndf
def _make_nice_names(ndf, reverse=False):
nnames = []
for col in ndf.columns.values:
strung = [str(i) for i in col]
if reverse is True:
strung = reversed(strung)
nnames.append("_".join(strung).strip())
return nnames
tzmap = {
"EST": "America/New_York",
"EDT": "America/New_York",
"CST": "America/Chicago",
"CDT": "America/Chicago",
"MST": "America/Denver",
"MDT": "America/Denver",
"PST": "America/Los_Angeles",
"PDT": "America/Los_Angeles",
}
def normalize_tz(row, tz_cd):
"""Assign the correct time zone to the data."""
try:
return row["Datetime"].tz_localize(tzmap[row[tz_cd]])
except KeyError:
return row["Datetime"]
def usgs_iv_dv_rdb_to_df(url, **kwargs):
"""Convert from USGS RDB type to pd.DataFrame."""
# Need to enforce RDB format
kwargs["format"] = "rdb"
kwargs["startDT"] = tsutils.parsedate(kwargs["startDT"], strftime="%Y-%m-%d")
kwargs["endDT"] = tsutils.parsedate(kwargs["endDT"], strftime="%Y-%m-%d")
includeCodes = True
if "includeCodes" in kwargs:
includeCodes = kwargs.pop("includeCodes")
ndf = _read_rdb(url, kwargs)
ndf["Datetime"] = pd.to_datetime(ndf["datetime"])
ndf.drop("datetime", axis="columns", inplace=True)
if "tz_cd" in ndf.columns:
ndf["Datetime"] = ndf.apply(normalize_tz, args=("tz_cd",), axis=1)
ndf.drop("tz_cd", axis="columns", inplace=True)
ndf.set_index(["agency_cd", "site_no", "Datetime"], inplace=True)
ndf = ndf.unstack(level=["site_no", "agency_cd"])
# Sometime in the near future figure out a better way because right now the
# ndf.unstack above can create a huge dataframe that is mostly NA.
# Workaround is to trim it down to size in next command.
ndf.dropna(axis="columns", how="all", inplace=True)
ndf.columns = _make_nice_names(ndf, reverse=True)
if includeCodes is False:
ndf.drop(
[i for i in ndf.columns if i[-3:] == "_cd"], axis="columns", inplace=True
)
return ndf
def usgs_stat_rdb_to_df(url, **kwargs):
"""Convert from USGS STAT_RDB type to pd.DataFrame."""
# set defaults.
for key, val in [
["statYearType", "calendar"],
["missingData", "off"],
["statType", "all"],
["statReportType", "daily"],
]:
try:
if kwargs[key] is None:
kwargs[key] = val
except KeyError:
kwargs[key] = val
# Need to enforce rdb format
kwargs["format"] = "rdb"
if kwargs["statReportType"] != "annual":
kwargs["statYearType"] = None
if kwargs["statReportType"] == "daily":
kwargs["missingData"] = None
ndf = _read_rdb(url, kwargs)
if kwargs["statReportType"] == "daily":
ndf["Datetime"] = [
"{:02d}-{:02d}".format(int(i), int(j))
for i, j in zip(ndf["month_nu"], ndf["day_nu"])
]
ndf.drop(["month_nu", "day_nu"], axis=1, inplace=True)
elif kwargs["statReportType"] == "monthly":
ndf["Datetime"] = pd.to_datetime(
[
"{}-{:02d}".format(i, int(j))
for i, j in zip(ndf["year_nu"], ndf["month_nu"])
]
)
ndf.drop(["year_nu", "month_nu"], axis=1, inplace=True)
else:
if kwargs["statYearType"] == "water":
ndf["Datetime"] = pd.to_datetime(
["{}-10-01".format(int(i) - 1) for i in ndf["year_nu"]]
)
else:
ndf["Datetime"] =
|
pd.to_datetime(ndf["year_nu"])
|
pandas.to_datetime
|
import os,re
import numpy as np
import pandas as pd
from collections import defaultdict
import seaborn as sns
import iplotter
from scipy.stats import linregress
from IPython.display import HTML
def read_interop_data(filepath):
"""
This function reads a dump file generated by interop_dumptext tool and returns a list of Pandas dataframe
:param filepath: A interop dumptext output path
:returns: A list of Pandas dataframes
* Tile
* Q2030
* Extraction
* Error
* EmpiricalPhasing
* CorrectedInt
* QByLane
"""
try:
if not os.path.exists(filepath):
raise IOError('File {0} not found'.format(filepath))
data = defaultdict(list)
header = None
data_header = None
with open(filepath,'r') as fp:
for line in fp:
line = line.strip()
if line.startswith('#'):
if line.startswith('# Version') or \
line.startswith('# Column Count') or \
line.startswith('# Bin Count') or \
line.startswith('# Channel Count'):
pass
else:
header=line.strip('# ').split(',')[0]
else:
if header is not None:
if 'Lane' in line.split(','):
data_header = line.split(',')
continue
if data_header is not None:
data[header].append(dict(zip(data_header,line.split(','))))
for key in ('CorrectedInt','Tile','Q2030','Extraction','EmpiricalPhasing','QByLane'):
if key not in data:
raise KeyError('No entry for {0} found in interop dump'.format(key))
error = data.get('Error')
if error is None:
error = pd.DataFrame(columns=["Lane","Tile","Cycle","ErrorRate","PhiXAdapterRate"])
else:
error = pd.DataFrame(error)
tile = pd.DataFrame(data.get('Tile'))
q2030 = pd.DataFrame(data.get('Q2030'))
extraction = pd.DataFrame(data.get('Extraction'))
correctedInt = pd.DataFrame(data.get('CorrectedInt'))
empiricalPhasing = pd.DataFrame(data.get('EmpiricalPhasing'))
qByLane = pd.DataFrame(data.get('QByLane'))
return tile,q2030,extraction,error,empiricalPhasing,correctedInt,qByLane
except Exception as e:
raise ValueError('Failed to extract data from interop dump, error:{0}'.format(e))
def read_runinfo_xml(runInfoXml_path):
"""
A function for reading RunInfo.xml file from Illumina sequencing run and returns data as Pandas DataFrame
:param runInfoXml_path: Filepath for RunInfo.xml
:returns: A Pandas dataframe containing the run configuration data
"""
try:
if not os.path.exists(runInfoXml_path):
raise IOError('File {0} not found'.format(runInfoXml_path))
pattern = re.compile(r'<Read Number=\"(\d)\" NumCycles=\"(\d+)\" IsIndexedRead=\"(Y|N)\" />')
read_info = list()
with open(runInfoXml_path,'r') as fp:
for line in fp:
line = line.strip()
if line.startswith('<Read Number'):
read_info.append(line)
read_start = 0
reads_stat = list()
for i in read_info:
if re.match(pattern,i):
read_number,numcycle,index_read = re.match(pattern,i).groups()
reads_stat.append({
'read_id':int(read_number),
'cycles':int(numcycle),
'start_cycle':int(read_start),
'index_read':index_read})
read_start += int(numcycle)
reads_stat = pd.DataFrame(reads_stat)
reads_stat['read_id'] = reads_stat['read_id'].astype(int)
return reads_stat
except Exception as e:
raise ValueError('Failed to read RunInfo.xml for sequencing run, error: {0}'.format(e))
def extract_read_data_from_tileDf(tileDf):
try:
read_data = list()
for read_id,r_data in tileDf.groupby('Read'):
for lane_id,l_data in r_data.groupby('Lane'):
read_count = l_data['ClusterCount'].astype(float).sum()
read_count = int(read_count)/1000000
read_count_pf = l_data['ClusterCountPF'].astype(float).sum()
read_count_pf = int(read_count_pf)/1000000
density_count = l_data['Density'].astype(float).mean()
density_count = int(density_count)/1000
pct_cluster_count_pf = '{0:.2f}'.format(int(read_count_pf)/int(read_count))
read_data.append({
'read_id':read_id,
'lane_id':lane_id,
'density':'{:.2f}'.format(density_count),
'read_count':'{:.2f}'.format(read_count),
'read_count_pf':'{:.2f}'.format(read_count_pf),
'cluster_pf':pct_cluster_count_pf})
read_data = pd.DataFrame(read_data)
read_data['read_id'] = read_data['read_id'].astype(int)
read_data['lane_id'] = read_data['lane_id'].astype(int)
return read_data
except Exception as e:
raise ValueError('Failed to extract data from TileDf, error: {0}'.format(e))
def extract_yield_data_from_q2030Df(q2030Df,runinfoDf):
try:
yield_data = list ()
q2030Df['Lane'] = q2030Df['Lane'].astype(int)
q2030Df['Cycle'] = q2030Df['Cycle'].astype(int)
for lane_id,l_data in q2030Df.groupby('Lane'):
for read_entry in runinfoDf.to_dict(orient='records'):
read_id = read_entry.get('read_id')
start_cycle = int(read_entry.get('start_cycle'))
total_cycle = int(read_entry.get('cycles'))
finish_cycle = start_cycle + total_cycle
r_q30 = l_data[(l_data['Cycle'] > start_cycle) & (l_data['Cycle'] < finish_cycle)]['Q30'].astype(int).fillna(0).sum()
r_t = l_data[(l_data['Cycle'] > start_cycle) & (l_data['Cycle'] < finish_cycle)]['Total'].astype(int).fillna(0).sum()
if int(r_q30) > 0 and \
int(r_t) > 0:
r_pct = '{:.2f}'.format(int(r_q30)/int(r_t) * 100)
r_yield = '{:.2f}'.format(int(r_t)/1000000000)
else:
r_pct = 0
r_yield = 0
yield_data.append({
'lane_id':lane_id,
'read_id':read_id,
'q30_pct':r_pct,
'yield':r_yield
})
yield_data = pd.DataFrame(yield_data)
yield_data['read_id'] = yield_data['read_id'].astype(int)
yield_data['lane_id'] = yield_data['lane_id'].astype(int)
return yield_data
except Exception as e:
raise ValueError('Failed to extract data from q2030Df, error: {0}'.format(e))
def get_extraction_data_from_extractionDf(extractionDf,runinfoDf):
try:
extractionDf['Lane'] = extractionDf['Lane'].astype(int)
extractionDf['Cycle'] = extractionDf['Cycle'].astype(int)
extractionDf['MaxIntensity_A'] = extractionDf['MaxIntensity_A'].astype(int)
extractionDf['MaxIntensity_T'] = extractionDf['MaxIntensity_T'].astype(int)
extractionDf['MaxIntensity_G'] = extractionDf['MaxIntensity_G'].astype(int)
extractionDf['MaxIntensity_C'] = extractionDf['MaxIntensity_C'].astype(int)
extraction_data = list()
for lane_id,l_data in extractionDf.groupby('Lane'):
for read_entry in runinfoDf.to_dict(orient='records'):
read_id = read_entry.get('read_id')
start_cycle = int(read_entry.get('start_cycle')) + 1
mean_a = l_data[l_data['Cycle']==start_cycle]['MaxIntensity_A'].mean()
intensity_c1 = '{:.2f}'.format(mean_a)
if intensity_c1=='nan':
intensity_c1 = 0
extraction_data.append({
'lane_id':lane_id,
'read_id':read_id,
'intensity_c1':intensity_c1})
extraction_data = pd.DataFrame(extraction_data)
extraction_data['lane_id'] = extraction_data['lane_id'].astype(int)
extraction_data['read_id'] = extraction_data['read_id'].astype(int)
return extraction_data
except Exception as e:
raise ValueError('Failed to get data from extractionDf, error: {0}'.format(e))
def get_data_from_errorDf(errorDf,runinfoDf):
try:
errorDf['Lane'] = errorDf['Lane'].astype(int)
errorDf['Cycle'] = errorDf['Cycle'].astype(int)
errorDf['ErrorRate'] = errorDf['ErrorRate'].astype(float)
error_data = list()
for lane_id,l_data in errorDf.groupby('Lane'):
for read_entry in runinfoDf.to_dict(orient='records'):
read_id = read_entry.get('read_id')
start_cycle = int(read_entry.get('start_cycle'))
total_cycle = int(read_entry.get('cycles'))
finish_cycle = start_cycle + total_cycle
error_cycles = l_data[(l_data['Cycle'] > start_cycle) & (l_data['Cycle'] < finish_cycle)]['Cycle'].drop_duplicates().count()
error_rate = l_data[(l_data['Cycle'] > start_cycle) & (l_data['Cycle'] < finish_cycle)]['ErrorRate'].mean()
error_rate = '{0:.3f}'.format(error_rate)
if error_rate == 'nan':
error_rate=0
error_data.append({
'lane_id':lane_id,
'read_id':read_id,
'error_cycles':str(error_cycles),
'error_rate':error_rate})
if len(error_data)==0:
error_data = \
pd.DataFrame(columns=['lane_id','read_id','error_cycles','error_rate'])
else:
error_data =
|
pd.DataFrame(error_data)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import PeriodIndex
import pandas._testing as tm
def test_to_native_types():
index = PeriodIndex(["2017-01-01", "2017-01-02", "2017-01-03"], freq="D")
# First, with no arguments.
expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype="=U10")
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(["2017-01-01", "2017-01-03"], dtype="=U10")
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype="=U10")
result = index.to_native_types(date_format="%m-%Y-%d")
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(["2017-01-01", pd.NaT, "2017-01-03"], freq="D")
expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object)
result = index.to_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
class TestPeriodIndexRendering:
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]}, index=pd.date_range("2000", periods=3))
result = repr(df)
expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3"
assert result == expected
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
# GH#7601
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D")
exp1 = "PeriodIndex([], dtype='period[D]', freq='D')"
exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"
exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', freq='D')"
exp4 = (
"PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')"
)
exp5 = (
"PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')"
)
exp6 = (
"PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')"
)
exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', freq='Q-DEC')"
exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', freq='Q-DEC')"
exp9 = (
"PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')"
)
exp10 = (
"PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')"
)
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9, exp10],
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
# GH#10971
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
exp1 = """Series([], dtype: period[D])"""
exp2 = """0 2011-01-01
dtype: period[D]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: period[D]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: period[D]"""
exp5 = """0 2011
1 2012
2 2013
dtype: period[A-DEC]"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: period[H]"""
exp7 = """0 2013Q1
dtype: period[Q-DEC]"""
exp8 = """0 2013Q1
1 2013Q2
dtype: period[Q-DEC]"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: period[Q-DEC]"""
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 =
|
pd.period_range("2013Q1", periods=2, freq="Q")
|
pandas.period_range
|
# -*- coding: utf-8 -*-
"""
@author: LeeZChuan
"""
import pandas as pd
import numpy as np
import requests
import os
from pandas.core.frame import DataFrame
import json
import datetime
import time
pd.set_option('display.max_columns',1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth',1000)
def addressProcess(address):
result = address
if '镇' in address:
item = address.split('镇')
result = item[0]+'镇'
elif '农场' in address:
item = address.split('农场')
result = item[0]+'农场'
elif '街道' in address:
item = address.split('街道')
result = item[0]+'街道'
elif '路' in address:
item = address.split('路')
result = item[0]+'路'
elif '大道' in address:
item = address.split('大道')
result = item[0]+'大道'
elif '街' in address:
item = address.split('街')
result = item[0]+'街'
elif '村' in address:
item = address.split('村')
result = item[0]+'村'
return result
def processJson(filePath):
orderNum = 0 #订单数
with open(filepath, 'r', encoding="utf-8") as f:
# 读取所有行 每行会是一个字符串
i = 0
for jsonstr in f.readlines():
list_address = []
list_name = []
jsonstr = jsonstr[1:-1]
# listValue = jsonstr.split(']];,')
listValue = jsonstr.split(']],')
for listitem in listValue:
listitem = listitem[1:]
listCon = listitem.split(',[')
listAddr = listCon[3][:-1].split(',')
if len(listAddr) == 2 and '海南省海口市' in listAddr[0] and '海南省海口市' in listAddr[1]:
list_address_each = []
startAdd = addressProcess(listAddr[0][6:])
endAdd = addressProcess(listAddr[1][6:])
if startAdd != endAdd:
list_address_each.append(startAdd)
list_address_each.append(endAdd)
list_address.append(list_address_each)
list_name.append(startAdd)
list_name.append(endAdd)
pd_list_address = pd.DataFrame(list_name)
# print (pd_list_address)
name_list_count = pd.value_counts(pd_list_address[0], sort=False)
name_df = pd_list_address[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list if name_list_count[name] > 300]
name_list_new = []
for item in name_list_all:
name_list_new.append(item[0])
print (name_list_new)
new_list_address = []
for item in list_address:
if item[0] in name_list_new and item[1] in name_list_new:
new_list = []
new_list.append(item[0])
new_list.append(item[1])
new_list_address.append(new_list)
orderNum += 1
return orderNum, list_address
def save(filename, contents):
fh = open(filename, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def dataSta(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
name_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
name_result.append(name[0])
name_result.append(name[1])
name_df = DataFrame(name_result)
name_list_count = pd.value_counts(name_df[0], sort=False)
name_df = name_df[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list]
print (name_list_all)
strValue = "{\"nodes\": [\n"
for item in name_list_all:
strValue = strValue+" {\"name\":\""+item[0] +"\",\n \"value\":"+str(item[1])+" \n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ],\n"
strValue = strValue + "\"links\": [\n"
for item in all_result:
strValue = strValue+" {\"source\":\""+item[0]+"\", \"target\":\""+item[1]+"\", \"value\":"+str(item[2])+"\n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]\n}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_nodes_links.json'
save(name_path, strValue)
def hexiantu(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
strValue = ''
strValue = strValue + "{\"value\": [\n"
for item in all_result:
strValue = strValue+" [\""+item[0]+"\", \""+item[1]+"\", "+str(item[2])+"],\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_hexiantu.json'
save(name_path, strValue)
def read_csv(filepath):
# raw_train_df = pd.read_csv(fileInfo, sep='\s+', engine='python').loc[:,[name_title+'arrive_time',name_title+'starting_lng',name_title+'starting_lat',name_title+'dest_lng',name_title+'dest_lat']]
raw_train_df = pd.read_csv(filepath, sep=',', engine='python').loc[:,['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'year', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']]
return raw_train_df
def orderNumByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
result = ''
result_distance = '[\n'
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
result = result+str(group_name)+','+str(group_data.shape[0])+'\n'
result_distance = result_distance +' [\n \"'+str(group_name)+'\",\n '+str(group_data.shape[0])+',\n '+str(int(group_data['passenger_count'].mean())/1000)+'\n ],\n'
result_order = result_distance[:-2] + '\n]'
name_path = os.getcwd()+'\lineChart\\'+txtname+'_lineChart.json'
save(name_path, result_order)
def save2(filepath, filename, contents):
if not os.path.exists(filepath):
os.mkdir(filepath)
path = filepath + '\\' + filename
fh = open(path, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def averagenum(num):
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def grade_mode(list):
'''
计算众数
参数:
list:列表类型,待分析数据
返回值:
grade_mode: 列表类型,待分析数据的众数
'''
# TODO
# 定义计算众数的函数
# grade_mode返回为一个列表,可记录一个或者多个众数
list_set=set(list)#取list的集合,去除重复元素
frequency_dict={}
for i in list_set:#遍历每一个list的元素,得到该元素何其对应的个数.count(i)
frequency_dict[i]=list.count(i)#创建dict; new_dict[key]=value
grade_mode=[]
for key,value in frequency_dict.items():#遍历dict的key and value。key:value
if value==max(frequency_dict.values()):
grade_mode.append(key)
def thermodynamicByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
list_count_start = []
list_count_end = []
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
print ('处理数据的时间段:', group_name)
result = '[\n'
groupByLocation = group_data.groupby([group_data['starting_lng'],group_data['starting_lat']])
for group_name2, group_data2 in groupByLocation:
list_count_start.append(len(group_data2))
if group_name2[0] > 100 and group_name2[1] < 40:
result = result + ' {\n \"lng\": ' + str(group_name2[0]) + ',\n \"lat\": ' + str(group_name2[1]) + ',\n \"count\": ' + str(len(group_data2)) + '\n },\n'
result = result[:-2] + '\n]'
result2 = '[\n'
groupByLocation2 = group_data.groupby([group_data['dest_lng'],group_data['dest_lat']])
for group_name3, group_data3 in groupByLocation2:
list_count_end.append(len(group_data3))
if group_name3[0] > 100 and group_name3[1] < 40:
result2 = result2 + ' {\n \"lng\": ' + str(group_name3[0]) + ',\n \"lat\": ' + str(group_name3[1]) + ',\n \"count\": ' + str(len(group_data3)) + '\n },\n'
result2 = result2[:-2] + '\n]'
txt_start = txtname+'_start'
txt_dest = txtname+'_dest'
path_start = os.getcwd()+'\dataForMulberryFigure\\'+txt_start
path_dest = os.getcwd()+'\dataForMulberryFigure\\'+txt_dest
name = str(group_name)+'.json'
save2(path_start, name, result)
save2(path_dest, name, result2)
def get_week_day(date):
week_day_dict = {
0 : '星期一',
1 : '星期二',
2 : '星期三',
3 : '星期四',
4 : '星期五',
5 : '星期六',
6 : '星期天',
}
day = date.weekday()
return week_day_dict[day]
def strGetAve(str1, str2):
return ((int(str1)+int(str2))/2)
def calendarHeatMap(foldername):
weatherPath = 'weather_05.xlsx'
weather_df = pd.DataFrame(pd.read_excel(weatherPath))
weather_df = weather_df.loc[:,['日期','天气状况','气温','holiday']]
weather_df['最高温度'] = [item[:2] for item in weather_df['气温']]
weather_df['最低温度'] = [item[-3:-1] for item in weather_df['气温']]
weather_df['平均温度'] = [strGetAve(item[:2],item[-3:-1]) for item in weather_df['气温']]
weather_df['周几'] = [get_week_day(st) for st in weather_df['日期']]
filelist=os.listdir('datasets')
dayLists = []
i = 0
for item in filelist:
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
raw_train_df = read_csv(filename)
dayList.append(raw_train_df.shape[0])
dayList.append(weather_df['天气状况'][i])
dayList.append(weather_df['周几'][i])
dayList.append(weather_df['最高温度'][i])
dayList.append(weather_df['最低温度'][i])
dayList.append(weather_df['平均温度'][i])
dayList.append(weather_df['holiday'][i])
i += 1
dayLists.append(dayList)
result = '[\n'
for item in dayLists:
print ('dealing--------:' + str(item[0]))
if str(item[7]) == '0':
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + '\"\n ],\n'
else:
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + str(item[7]) + '\"\n ],\n'
file = open('calendarHeatMap.json','w', encoding="utf-8")
file.write(result[:-2]+'\n]')
file.close()
def readTxt(filename):
pos = []
with open(filename, 'r', encoding='utf-8') as file_to_read:
while True:
lines = file_to_read.readline() # 整行读取数据
if not lines:
break
pass
p_tmp = [i for i in lines.split(',')] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
pos.append(p_tmp) # 添加新读取的数据
pass
return pos
def RealtimeStatistics(foldername):
filelist=os.listdir('datasets')
realtimeStati = []
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos = DataFrame(pos)
pos = pos.drop([1], axis=1)
pos.columns = ['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']
pos['passenger_count'] = [float(item)/1000 for item in pos['passenger_count']]
pos['normal_time'] = ['0' if str(item) == '' else item for item in pos['normal_time']]
pos['changtu'] = [1 if item > 30 or item == 30 else 0 for item in pos['passenger_count']]
result1 = np.round(pos['changtu'].sum()/(pos['passenger_count'].shape[0])*100,3)
pos['kuaiche'] = [1 if str(item) == '3.0' else 0 for item in pos['product_1level']]
result2 = np.round(pos['kuaiche'].sum()/(pos['kuaiche'].shape[0])*100,3)
pos['gaojia'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['pre_total_fee']]
result3 = np.round(pos['gaojia'].sum()/(pos['pre_total_fee'].shape[0])*100,3)
pos['changshi'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['normal_time']]
result4 = np.round(pos['changshi'].sum()/(pos['normal_time'].shape[0])*100,3)
print (item[:-4], str(result1)+'%', str(result2)+'%', str(result3)+'%', str(result4)+'%')
dayList.append(str(result1)+'%')
dayList.append(str(result2)+'%')
dayList.append(str(result3)+'%')
dayList.append(str(result4)+'%')
realtimeStati.append(dayList)
file = open('RealtimeStatistics.json','w', encoding="utf-8")
file.write(str(realtimeStati))
file.close()
def normalization2(data):
_range = np.max(abs(data))
return np.round(data / _range, 4)
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma
def Histogrammap(foldername):
filelist=os.listdir('datasets')
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
savefile = item[:-4]
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos =
|
DataFrame(pos)
|
pandas.core.frame.DataFrame
|
"""Create All The Names.
Merges all the various name sources together into one big CSV.
"""
import os
import pandas as pd
def build_data_sources() -> None:
"""Execute all the data sourcing scripts.
Output expected to be in data/ folder
"""
location = "src/data_collection"
data_source_generators = os.listdir(location)
data_source_generators = [x for x in data_source_generators if x.endswith(".py")]
for source_generator in data_source_generators:
print(f"Executing: {source_generator}")
try:
os.system(f"python {location}/{source_generator}")
except: # noqa
print(f"FAILED: {source_generator}")
def read_csv(base_path: str, name: str, sep: str = "|") -> pd.DataFrame:
"""Basic wrapper on pandas read_csv method.
Args:
base_path (str): Path to the file
name (str): file name & extension to read
sep (str, optional): CSV delimiter. Defaults to "|".
Returns:
pd.DataFrame: The read in data
"""
read_path = f"{base_path}/{name}"
return pd.read_csv(read_path, sep=sep, encoding="utf-8")
def merge_data_sources(data_location: str = "data/") -> None:
"""Takes all name data files & merges+cleans into a single file.
Args:
data_location (str, optional): Directory containing all files to merge
- Defaults to "data/".
"""
# Identify all generated files
names_data_files = os.listdir(data_location)
names_data_files = [x for x in names_data_files if x.endswith(".csv")]
print(f"Found {len(names_data_files)} name source files to merge")
# Read & Merge
names_data = [read_csv("data", name) for name in names_data_files]
names_df =
|
pd.concat(names_data)
|
pandas.concat
|
import numpy as np
import pandas as pd
import re
import pickle
import scipy.sparse
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import html
from nltk.stem import WordNetLemmatizer
from bs4 import BeautifulSoup
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
lemma = WordNetLemmatizer()
def html_preprocessing(text):
soup = BeautifulSoup(text)
for script in soup(["script","style"]):
script.extract()
text = soup.get_text()
return text
def preprocess(text):
#=re.sub("<script[^>]*>(.*?)<\/?script>"," ",str(text))
#text = re.sub('<[^>]*>',' ',text)
text = html_preprocessing(text)
text=re.sub('[^a-zA-Z0-9-]',' ',text)
text=re.sub(r"\b[nbrt]\b",' ',text)
text = ' '.join([lemma.lemmatize(word) for word in text.split()])
return text
data =
|
pd.read_pickle('crawler.pk1')
|
pandas.read_pickle
|
from .busSim.manager import managerFactory
from .result.searchResult import SearchResult
from .util import gen_start_time, transform
from .gtfs_edit import copy_with_edits
from .service.yelp import get_results
from .census import Census
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
from shapely.wkt import loads
from pyproj import Transformer
from zipfile import ZipFile
from io import TextIOWrapper
import os
from pathlib import Path
from math import ceil, floor
from collections import defaultdict
import time
class SCanalyzer:
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
self.orig_gtfs_path = gtfs_path
self.base_out_path = self._get_out_path()
self.out_path = self.base_out_path
self._preprocess_gtfs()
def gtfs_edit(self, edit_fn, route, from_orig=True):
orig_gtfs_name = os.path.basename(self.orig_gtfs_path)
modified_gtfs_name = f"{edit_fn.__name__}-{route}-{orig_gtfs_name}"
modified_gtfs_path = os.path.join(
self.base_out_path, modified_gtfs_name)
from_path = self.orig_gtfs_path if from_orig else self.gtfs_path
copy_with_edits(from_path, modified_gtfs_path, edit_fn, route)
self.gtfs_path = modified_gtfs_path
def set_batch_label(self, label):
self.out_path = os.path.join(self.base_out_path, label)
Path(self.out_path).mkdir(parents=True, exist_ok=True)
def reset_batch_label(self):
self.out_path = self.base_out_path
def search(self, config, perf_df=None):
# prerun check
if not config.is_runnable():
raise Exception("The current config is not runnable")
# dynamically init a manager
manager = managerFactory.create(
config.get_run_env(), gtfs_path=self.gtfs_path, out_path=self.out_path, borders=self.borders)
result_df = manager.run_batch(config, perf_df)
return result_df
def load_census(self, cache=True):
"""
Looks for a stops.csv file in data/mmt_gtfs, queries TigerWeb Census API to pull out census tracts
based on the center and radius of the system. An optional addition of 1km (default) is added to the radius.
From the tracts, and a default set of demographs the ACS 5-year 2019 dataset is queried to get the demographics
data for each tract. A few statistics are computed. It returns a geodataframe with all of this information and
saves it to the output folder.
cache default=True, if true will load a saved result and return
"""
# Pull from Cache and return:
cache_path = os.path.join(self.base_out_path, "census.csv")
if cache and os.path.exists(cache_path):
census_df =
|
pd.read_csv(cache_path)
|
pandas.read_csv
|
from datetime import datetime
import warnings
import pytest
import pandas as pd
import pyodbc
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, conversion, create
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="module")
def sample():
dataframe = pd.DataFrame(
{
"_varchar": [None, "b", "c", "4", "e"],
"_tinyint": [None, 2, 3, 4, 5],
"_smallint": [256, 2, 6, 4, 5], # tinyint max is 255
"_int": [32768, 2, 3, 4, 5], # smallint max is 32,767
"_bigint": [2147483648, 2, 3, None, 5], # int max size is 2,147,483,647
"_float": [1.111111, 2, 3, 4, 5], # any decicmal places
"_time": [str(datetime.now().time())]
* 5, # string in format HH:MM:SS.ffffff
"_datetime": [datetime.now()] * 4 + [pd.NaT],
"_empty": [None] * 5,
}
)
return dataframe
def test_table_errors(sql):
table_name = "##test_table_column"
with pytest.raises(KeyError):
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, primary_key_column="Z")
def test_table_column(sql):
table_name = "##test_table_column"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "A")
assert all(schema["sql_type"] == "varchar")
assert all(schema["is_nullable"] == True)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "string")
assert all(schema["odbc_type"] == pyodbc.SQL_VARCHAR)
assert all(schema["odbc_size"] == 0)
assert all(schema["odbc_precision"] == 0)
def test_table_pk(sql):
table_name = "##test_table_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "FLOAT"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_composite_pk(sql):
table_name = "##test_table_composite_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(5)", "C": "FLOAT"}
primary_key_column = ["A", "B"]
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, 2, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, False, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_pk_input_error(sql):
with pytest.raises(ValueError):
table_name = "##test_table_pk_input_error"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "DECIMAL(5,2)"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
sql_primary_key=True,
)
def test_table_sqlpk(sql):
table_name = "##test_table_sqlpk"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, sql_primary_key=True)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 2
assert all(schema.index == ["_pk", "A"])
assert all(schema["sql_type"] == ["int identity", "varchar"])
assert all(schema["is_nullable"] == [False, True])
assert all(schema["ss_is_identity"] == [True, False])
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA], index=["_pk", "A"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True])
assert all(schema["pandas_type"] == ["Int32", "string"])
assert all(schema["odbc_type"] == [pyodbc.SQL_INTEGER, pyodbc.SQL_VARCHAR])
assert all(schema["odbc_size"] == [4, 0])
assert all(schema["odbc_precision"] == [0, 0])
def test_table_from_dataframe_simple(sql):
table_name = "##test_table_from_dataframe_simple"
dataframe = pd.DataFrame({"ColumnA": [1]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "ColumnA")
assert all(schema["sql_type"] == "tinyint")
assert all(schema["is_nullable"] == False)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "UInt8")
assert all(schema["odbc_type"] == pyodbc.SQL_TINYINT)
assert all(schema["odbc_size"] == 1)
assert all(schema["odbc_precision"] == 0)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result.equals(dataframe)
def test_table_from_dataframe_datestr(sql):
table_name = "##test_table_from_dataframe_datestr"
dataframe = pd.DataFrame({"ColumnA": ["06/22/2021"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame({
'column_name': pd.Series(['ColumnA','_time_insert']),
'sql_type': pd.Series(['date','datetime2'], dtype='string'),
'is_nullable': pd.Series([False, True]),
'ss_is_identity': pd.Series([False, False]),
'pk_seq': pd.Series([None, None], dtype='Int64'),
'pk_name': pd.Series([None, None], dtype='string'),
'pandas_type': pd.Series(['datetime64[ns]', 'datetime64[ns]'], dtype='string'),
'odbc_type': pd.Series([pyodbc.SQL_TYPE_DATE, pyodbc.SQL_TYPE_TIMESTAMP], dtype='int64'),
'odbc_size': pd.Series([10, 27], dtype='int64'),
'odbc_precision': pd.Series([0, 7], dtype='int64'),
}).set_index(keys='column_name')
assert schema[expected.columns].equals(expected)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_errorpk(sql, sample):
with pytest.raises(ValueError):
table_name = "##test_table_from_dataframe_nopk"
sql.create.table_from_dataframe(table_name, sample, primary_key="ColumnName")
def test_table_from_dataframe_nopk(sql, sample):
table_name = "##test_table_from_dataframe_nopk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key=None
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[True, True, False, False, True, False, False, True, True], dtype="bool"
),
"ss_is_identity": pd.Series([False] * 9, dtype="bool"),
"pk_seq": pd.Series([pd.NA] * 9, dtype="Int64"),
"pk_name": pd.Series([pd.NA] * 9, dtype="string"),
"pandas_type": pd.Series(
[
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision": pd.Series([0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64"),
}
).set_index(keys="column_name")
assert schema[expected.columns].equals(expected.loc[schema.index])
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_sqlpk(sql, sample):
table_name = "##test_table_from_dataframe_sqlpk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key="sql"
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_pk",
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"int identity",
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[False, True, True, False, False, True, False, False, True, True],
dtype="bool",
),
"ss_is_identity": pd.Series([True] + [False] * 9, dtype="bool"),
"pk_seq": pd.Series([1] + [pd.NA] * 9, dtype="Int64"),
"pandas_type": pd.Series(
[
"Int32",
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_INTEGER,
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([4, 0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision": pd.Series([0, 0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64"),
}
).set_index(keys="column_name")
assert schema[expected.columns].equals(expected.loc[schema.index])
assert pd.notna(schema.at["_pk", "pk_name"])
assert schema.loc[schema.index != "_pk", "pk_name"].isna().all()
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
result = result.reset_index(drop=True)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_indexpk_unnamed(sql, sample):
table_name = "##test_table_from_dataframe_indexpk_unnamed"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key="index"
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_index",
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"tinyint",
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[False, True, True, False, False, True, False, False, True, True],
dtype="bool",
),
"ss_is_identity": pd.Series([False] * 10, dtype="bool"),
"pk_seq": pd.Series([1] + [pd.NA] * 9, dtype="Int64"),
"pandas_type": pd.Series(
[
"UInt8",
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_TINYINT,
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([1, 0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision": pd.Series([0, 0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64"),
}
).set_index(keys="column_name")
assert schema[expected.columns].equals(expected.loc[schema.index])
assert pd.notna(schema.at["_index", "pk_name"])
assert schema.loc[schema.index != "_index", "pk_name"].isna().all()
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_indexpk_named(sql, sample):
table_name = "##test_table_from_dataframe_indexpk_named"
sample.index.name = "NamedIndex"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key="index"
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"NamedIndex",
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"tinyint",
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[False, True, True, False, False, True, False, False, True, True],
dtype="bool",
),
"ss_is_identity":
|
pd.Series([False] * 10, dtype="bool")
|
pandas.Series
|
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-42-<NAME>
1. all plug load type of data
'''
import os
import glob
import string
import datetime
import pandas as pd
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-42-Andrew Sonta/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-42-Andrew Sonta/_yapan_processing/'
# read templates into pandas
template_appliance = pd.read_csv(template_path + 'Appliance_Usage.csv')
''' read the data into pandas '''
combined_df = pd.read_csv(data_path + 'rwc-clean-trimmed.csv')
combined_df = combined_df.rename(columns={combined_df.columns[0]: 'Date_Time'})
combined_df = combined_df[combined_df['Date_Time'].notnull()]
plug_list = list(combined_df.columns[1:]) # get the list of all the plug names
# concat all the columns
plug_temp_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import re
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from matplotlib import dates
_time_ = 'time'
_max_ = 'max resp. time. (ms)'
_min_ = 'min resp. time (ms)'
_avg_ = 'avg resp. time (ms)'
_sum_ = 'req/s'
_err_ = 'errors'
def load_rawdata(filepath, label):
data = {_time_: [], label + ' ' + _sum_: [],label + ' ' + _avg_: [], label + ' ' + _min_: [], label + ' ' + _max_: [], label + ' ' + _err_: []}
with open(filepath) as f:
#o.a.j.r.Summariser
pattern = '^(?P<time>2020.*) INFO (o\.a\.j\.r\.Summariser: summary [=].+ =\s+)(?P<summary>\d+\.\d+/s)( Avg:\s+)(?P<avg>\d+)( Min:\s+)(?P<min>\d+)( Max:\s+)(?P<max>\d+)( Err:\s+)\d+\s\((?P<err>\d+\.\d+)'
for row in f:
search = re.findall(pattern, row)
if search:
search = search[0]
data[_time_].append(search[0].replace(',','.'))
data[label + ' ' + _sum_].append(float(search[2].split('/')[0]))
data[label + ' ' + _avg_].append(int(search[4]))
data[label + ' ' + _min_].append(int(search[6]))
data[label + ' ' + _max_].append(int(search[8]))
data[label + ' ' + _err_].append(float(search[10]))
df = pd.DataFrame(data)
df['time'] =
|
pd.to_datetime(df['time'], format='%Y-%m-%d %H:%M:%S.%f')
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Cleaner de CSVs a partir de reglas de limpieza.
La clase DataCleaner permite limpiar archivos CSVs con datos a partir de la
aplicación de reglas de limpieza.
"""
import pandas as pd
import geopandas as gpd
import pycrs
from dateutil import tz
import arrow
import parsley
from unidecode import unidecode
import unicodecsv
import cchardet
import warnings
import inspect
import re
import os
import subprocess
from functools import partial
from .fingerprint_keyer import group_fingerprint_strings
from .fingerprint_keyer import get_best_replacements, replace_by_key
from .capitalizer import capitalize
from .georef_api import *
class DuplicatedField(ValueError):
"""Salta cuando hay un campo duplicado en el dataset."""
def __init__(self, value):
"""Crea mensaje de error."""
msg = "El campo '{}' está duplicado. Campos duplicados no permitidos."
super(DuplicatedField, self).__init__(msg)
class DataCleaner(object):
"""Crea un objeto DataCleaner cargando un CSV en un DataFrame y expone
reglas de limpieza para operar sobre las columnas del objeto y retornar un
CSV limplio."""
OUTPUT_ENCODING = str("utf-8")
OUTPUT_SEPARATOR = str(",")
OUTPUT_QUOTECHAR = str('"')
INPUT_DEFAULT_ENCODING = str("utf-8")
INPUT_DEFAULT_SEPARATOR = str(",")
INPUT_DEFAULT_QUOTECHAR = str('"')
DEFAULT_SUFIX = "normalizado"
def __init__(self, input_path, ignore_dups=False, **kwargs):
"""Carga datos a limpiar en un DataFrame, normalizando sus columnas.
Args:
input_path (str): Ruta al archivo que se va a limpiar.
ignore_dups (bool): Ignora los duplicados en colunas
kwargs: Todos los argumentos que puede tomar `pandas.read_csv`
"""
default_args = {
'encoding': self._get_file_encoding(input_path),
'sep': self.INPUT_DEFAULT_SEPARATOR,
'quotechar': self.INPUT_DEFAULT_QUOTECHAR
}
default_args.update(kwargs)
# chequea que no haya fields con nombre duplicado
if not ignore_dups and input_path.endswith('.csv'):
self._assert_no_duplicates(input_path,
encoding=default_args['encoding'],
sep=default_args['sep'],
quotechar=default_args['quotechar'])
# lee el SHP a limpiar
if input_path.endswith('.shp'):
self.df = gpd.read_file(
input_path,
encoding=default_args['encoding']
)
# lee la proyección del .prj, si puede
try:
projection_path = input_path.replace('.shp', '.prj')
self.source_crs = pycrs.loader.from_file(
projection_path).to_proj4()
except Exception as e:
print(e)
self.source_crs = self.df.crs
# lee el CSV a limpiar
elif input_path.endswith('.csv'):
self.df = pd.read_csv(
input_path, dtype=str, **default_args)
# lee el XLSX a limpiar
elif input_path.endswith('.xlsx'):
self.df = pd.read_excel(input_path, engine="xlrd", **default_args)
else:
raise Exception(
"{} no es un formato soportado.".format(
input_path.split(".")[-1]))
# limpieza automática
# normaliza los nombres de los campos
self.df.columns = self._normalize_fields(self.df.columns)
# remueve todos los saltos de línea
if len(self.df) > 0:
self.df = self.df.applymap(self._remove_line_breaks)
# guarda PEGs compiladas para optimizar performance
self.grammars = {}
def _assert_no_duplicates(self, input_path, encoding, sep, quotechar):
if input_path.endswith('.csv'):
with open(input_path, 'rb') as csvfile:
reader = unicodecsv.reader(csvfile,
encoding=encoding,
delimiter=sep,
quotechar=quotechar)
fields = next(reader, [])
for col in fields:
if fields.count(col) > 1:
raise DuplicatedField(col)
# TODO: Implementar chequeo de que no hay duplicados para XLSX
elif input_path.endswith('.xlsx'):
pass
def _get_file_encoding(self, file_path):
"""Detecta la codificación de un archivo con cierto nivel de confianza
y devuelve esta codificación o el valor por defecto.
Args:
file_path (str): Ruta del archivo.
Returns:
str: Codificación del archivo.
"""
with open(file_path, 'rb') as f:
info = cchardet.detect(f.read())
return (info['encoding'] if info['confidence'] > 0.75
else self.INPUT_DEFAULT_ENCODING)
def _normalize_fields(self, fields):
return [self._normalize_field(field) for field in fields]
def _normalize_field(self, field, sep="_"):
"""Normaliza un string para ser nombre de campo o sufijo de dataset.
Args:
field (str): Nombre original del campo o sufijo de datset.
sep (str): Separador para el nombre normalizado.
Returns:
str: Nombre de campo o sufijo de datset normalizado.
"""
if not isinstance(field, str):
field = str(field)
# reemplaza caracteres que no sean unicode
norm_field = unidecode(field).strip()
norm_field = norm_field.replace(" ", sep)
norm_field = norm_field.replace("-", sep).replace("_", sep)
norm_field = norm_field.replace("/", sep)
norm_field = self._camel_convert(norm_field).lower()
# remueve caracteres que no sean alfanuméricos o "_"
norm_field = ''.join(char for char in norm_field
if char.isalnum() or char == "_")
# emite un Warning si tuvo que normalizar el field
if field != norm_field:
caller_rule = self._get_normalize_field_caller(
inspect.currentframe())
msg = """
El campo "{}" no sigue las convenciones para escribir
campos (sólo se admiten caracteres alfanuméricos ASCII en
minúsculas, con palabras separadas por "{}"). DataCleaner
normaliza automáticamente los campos en estos casos, lo
que puede llevar a resultados inesperados.
El nuevo nombre del campo normalizado es: "{}".
Método que llamó al normalizador de campos: {}
""".format(field, sep, norm_field, caller_rule)
warnings.warn(msg)
return norm_field
@staticmethod
def _camel_convert(name):
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
@staticmethod
def _get_normalize_field_caller(curframe):
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
if calframe[2][3] != "_normalize_fields":
caller_rule = calframe[2][3]
else:
caller_rule = calframe[3][3]
return caller_rule
@staticmethod
def _remove_line_breaks(value, replace_char=" "):
if isinstance(value, str):
return str(value).replace('\n', replace_char)
else:
return value
# Métodos GLOBALES
def clean(self, rules):
"""Aplica las reglas de limpieza al objeto en memoria.
Args:
rules (list): Lista de reglas de limpieza.
"""
for rule_item in rules:
for rule in rule_item:
rule_method = getattr(self, rule)
for kwargs in rule_item[rule]:
kwargs["inplace"] = True
rule_method(**kwargs)
def clean_file(self, rules, output_path):
"""Aplica las reglas de limpieza y guarda los datos en un csv.
Args:
rules (list): Lista de reglas de limpieza.
"""
self.clean(rules)
self.save(output_path)
def save(self, output_path, geometry_name='geojson',
geometry_crs='epsg:4326'):
"""Guarda los datos en un nuevo CSV con formato estándar.
El CSV se guarda codificado en UTF-8, separado con "," y usando '"'
comillas dobles como caracter de enclosing."""
if isinstance(self.df, gpd.GeoDataFrame):
# Convierte la proyección, si puede.
if geometry_crs:
try:
self.df.crs = self.source_crs
self.df = self.df.to_crs({'init': geometry_crs})
except Exception as e:
print(e)
print("Se procede sin re-proyectar las coordenadas.")
if output_path.endswith('.csv'):
self._set_json_geometry(geometry_name)
# Guarda el archivo en formato GeoJSON o KML.
if output_path.endswith('json'): # Acepta .json y .geojson.
self.df.to_file(output_path, driver='GeoJSON')
return
elif output_path.endswith('kml'):
self._save_to_kml(output_path)
return
self.df.set_index(self.df.columns[0]).to_csv(
output_path, encoding=self.OUTPUT_ENCODING,
sep=self.OUTPUT_SEPARATOR,
quotechar=self.OUTPUT_QUOTECHAR)
def _save_to_kml(self, output_path):
aux_file = output_path + '.json'
self.df.to_file(aux_file, driver='GeoJSON')
command = 'ogr2ogr -f KML {} {}'.format(output_path, aux_file)
subprocess.call(command, shell=True)
os.remove(aux_file)
def _set_json_geometry(self, geometry_name):
"""Transforma la geometría del GeoDataFrame a formato JSON."""
geojson = self.df.geometry.to_json()
features = json.loads(geojson)['features']
geometries = [feature['geometry'] for feature in features]
# Convierte cada geometría en un string JSON válido.
self.df[geometry_name] = [json.dumps(geometry)
for geometry in geometries]
del self.df['geometry']
def _update_series(self, field, new_series,
keep_original=False, prefix=None, sufix=None):
"""Agrega o pisa una serie nueva en el DataFrame."""
if not keep_original:
self.df[field] = new_series
else:
new_field = "_".join([elem for elem in [prefix, field, sufix]
if elem])
self.df.insert(self.df.columns.get_loc(field),
new_field, new_series)
# Métodos INDIVIDUALES de LIMPIEZA
def remover_columnas(self, field, inplace=False):
"""Remueve columnas.
Args:
field (str): Campo a limpiar
Returns:
pandas.DataFrame: Data frame con las columnas removidas.
"""
field = self._normalize_field(field)
if field not in self.df.columns:
warnings.warn("No existe el campo '{}'".format(field))
return self.df
removed_df = self.df.drop(field, axis=1)
if inplace:
self.df = removed_df
return removed_df
def remover_filas_duplicadas(self, all_fields=True, fields=None, inplace=False):
"""Remueve filas duplicadas.
Args:
all_fields (bool): Si es true, se usan todas las columnas y se ignora el argumento fields
fields (list): Lista de nombres de columnas a ser usadas para identificar filas duplicadas
inplace (bool): Específica si la limpieza perdura en el objeto.
Returns:
pandas.DataFrame: Data frame con las columnas removidas.
"""
if all_fields:
removed_df = self.df.drop_duplicates().reset_index(drop=True)
else:
removed_df = self.df.drop_duplicates(
subset=fields).reset_index(drop=True)
if inplace:
self.df = removed_df
return removed_df
def renombrar_columnas(self, field, new_field, inplace=False):
"""Renombra una columna.
Args:
field (str): Campo a renombrar.
field (str): Nuevo nombre
Returns:
pandas.DataFrame: Data frame con las columnas renombradas.
"""
field = self._normalize_field(field)
new_field = self._normalize_field(new_field)
renamed_df = self.df.rename(columns={field: new_field})
if inplace:
self.df = renamed_df
return renamed_df
def nombre_propio(self, field, sufix=None, lower_words=None,
keep_original=False, inplace=False):
"""Regla para todos los nombres propios.
Capitaliza los nombres de países, ciudades, personas, instituciones y
similares.
Args:
field (str): Campo a limpiar
Returns:
pandas.Series: Serie de strings limpios
"""
sufix = sufix or self.DEFAULT_SUFIX
field = self._normalize_field(field)
series = self.df[field]
capitalized = series.apply(capitalize, lower_words=lower_words)
if inplace:
self._update_series(field=field, sufix=sufix,
keep_original=keep_original,
new_series=capitalized)
return capitalized
def string(self, field, sufix=None, sort_tokens=False,
remove_duplicates=False, keep_original=False, inplace=False):
"""Regla para todos los strings.
Aplica un algoritimo de clustering para normalizar strings que son
demasiado parecidos, sin pérdida de información.
Args:
field (str): Campo a limpiar.
Returns:
pandas.Series: Serie de strings limpios.
"""
sufix = sufix or self.DEFAULT_SUFIX
field = self._normalize_field(field)
series = self.df[field]
clusters, counts = group_fingerprint_strings(
series, sort_tokens=sort_tokens,
remove_duplicates=remove_duplicates)
replacements = get_best_replacements(clusters, counts)
parsed_series = pd.Series(replace_by_key(replacements, series))
parsed_series = parsed_series.str.strip()
if inplace:
self._update_series(field=field, sufix=sufix,
keep_original=keep_original,
new_series=parsed_series)
return parsed_series
def mail_format(self, field, sufix=None,
keep_original=False, inplace=False):
"""Regla para dar formato a las direcciones de correo electronico.
Lleva todas las cadenas a minusculas y luego si hay varias las separa
por comas.
Args:
field (str): Campo a limpiar
Returns:
pandas.Series: Serie de strings limpios
"""
sufix = sufix or self.DEFAULT_SUFIX
field = self._normalize_field(field)
series = self.df[field].str.lower()
series = series.str.findall('[a-z_0-9\.]+@[a-z_0-9\.]+').str.join(", ")
if inplace:
self._update_series(field=field, sufix=sufix,
keep_original=keep_original,
new_series=series)
return series
def reemplazar(self, field, replacements, sufix=None,
keep_original=False, inplace=False):
"""Reemplaza listas de valores por un nuevo valor.
Args:
field (str): Campo a limpiar
replacements (dict): {"new_value": ["old_value1", "old_value2"]}
Returns:
pandas.Series: Serie de strings limpios
"""
sufix = sufix or self.DEFAULT_SUFIX
field = self._normalize_field(field)
series = self.df[field]
for new_value, old_values in replacements.items():
series = series.replace(old_values, new_value)
if inplace:
self._update_series(field=field, sufix=sufix,
keep_original=keep_original,
new_series=series)
return series
def reemplazar_string(self, field, replacements, sufix=None,
keep_original=False, inplace=False):
"""Reemplaza listas de strings por un nuevo string.
A diferencias de la funcion reemplazar hace reemplazos parciales.
Args:
field (str): Campo a limpiar
replacements (dict): {"new_value": ["old_value1", "old_value2"]}
Returns:
pandas.Series: Serie de strings limpios
"""
sufix = sufix or self.DEFAULT_SUFIX
field = self._normalize_field(field)
series = self.df[field]
for new_value, old_values in replacements.items():
# for old_value in sorted(old_values, key=len, reverse=True):
for old_value in old_values:
replace_function = partial(self._safe_replace,
old_value=old_value,
new_value=new_value)
series = map(replace_function, series)
if inplace:
self._update_series(field=field, sufix=sufix,
keep_original=keep_original,
new_series=series)
return series
@staticmethod
def _safe_replace(string, old_value, new_value):
if
|
pd.isnull(string)
|
pandas.isnull
|
from django.shortcuts import render
from django.views.generic import TemplateView
import pandas as pd
from .utils import clean_html
from form_submissions.models import FormResponse
from typeforms.models import Typeform
class DashboardView(TemplateView):
template_name = 'dashboard.html'
def get(self, request, typeform_uid):
typeform = Typeform.objects.get(uid=typeform_uid)
questions = typeform.payload['questions']
df_questions = pd.DataFrame(questions)
form_responses = FormResponse.objects.filter(typeform=typeform)
answers = [each.answers for each in form_responses if each.answers]
df_answers = pd.DataFrame(answers)
question_groups = list(filter(
lambda x: type(x) == str, df_questions.group.unique()
))
charts = []
for each_question_group in question_groups:
chart = {'data': []}
chart['id'] = each_question_group
chart['label'] = df_questions[
df_questions['id'] == each_question_group
]['question'].iloc[0] + '...'
questions_each_group = df_questions[
df_questions['group'] == each_question_group
][['id', 'question']]
question_list = zip(
questions_each_group.id,
questions_each_group.question
)
for idx, question in question_list:
chart['data'].append(
[
clean_html(question.replace('\xa0', '')),
df_answers[idx].mean()
]
)
charts.append(chart)
chart = {'data': []}
chart['id'] = 'group_others'
chart['label'] = 'Others'
questions_in_no_group = df_questions[
df_questions['group'].isnull() &
df_questions['id'].str.contains('rating_')
][['id', 'question']]
question_list = zip(
questions_in_no_group.id,
questions_in_no_group.question
)
for idx, question in question_list:
chart['data'].append(
[
clean_html(question.replace('\xa0', '')),
df_answers[idx].mean()
]
)
charts.append(chart)
return render(
request,
self.template_name,
{
'charts': charts
}
)
class DashboardBranchView(TemplateView):
template_name = 'dashboard_branch.html'
def get(self, request, typeform_uid):
typeform = Typeform.objects.get(uid=typeform_uid)
questions = typeform.payload['questions']
df_questions =
|
pd.DataFrame(questions)
|
pandas.DataFrame
|
import datetime
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import pytest
from visions.application.summaries import CompleteSummary
from visions.types import (
URL,
Boolean,
Categorical,
Complex,
DateTime,
Float,
Geometry,
Integer,
Object,
String,
)
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
if isinstance(trial_output[metric], pd.Series):
trial_output[metric] = trial_output[metric].to_dict()
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=Integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_integer_missing_summary(summary, visions_type=Integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
"na_count": 0,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_float_missing_summary(summary, visions_type=Float):
test_series =
|
pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, np.nan])
|
pandas.Series
|
import operator
import collections
import pandas as pd
import sqlalchemy as sa
from . import db
from . import utils
from . import coercion
from . import base
from . import generic
from . import ops_mixin
def row_to_query(index, data):
if not pd.api.types.is_list_like(index):
index = [index]
else:
index = list(index)
if not pd.api.types.is_list_like(data):
data = [data]
else:
data = list(data)
return sa.select([sa.literal(v) for v in index + data])
def dataframe_op(op, name=None, before=None, after=None):
def op_func(self, other, axis="columns", level=None, fill_value=None):
df = self if before is None else before(self)
result = df._op(op,
other,
axis=axis,
level=level,
fill_value=fill_value)
return result if after is None else after(result)
def rop_func(self, other, axis="columns", level=None, fill_value=None):
df = self if before is None else before(self)
result = df._op(op,
other,
axis=axis,
level=level,
fill_value=fill_value,
reverse=True)
return result if after is None else after(result)
if name is None:
name = op.__name__
op_func.__name__ = name
rop_func.__name__ = "r" + name
return op_func, rop_func
def dataframe_cmp(op, name=None, before=None, after=None):
def cmp_func(self, other, axis="columns", level=None):
df = self if before is None else before(self)
result = df._op(op, other, axis=axis, level=level)
return result if after is None else after(result)
cmp_func.__name__ = op.__name__ if name is None else name
return cmp_func
def series_op(op, name=None, before=None, after=None):
def op_func(self, other, level=None, fill_value=None, axis=0):
seq = self if before is None else before(self)
result = seq._op(op,
other,
level=level,
fill_value=fill_value,
axis=axis)
return result if after is None else after(result)
def rop_func(self, other, level=None, fill_value=None, axis=0):
seq = self if before is None else before(self)
result = seq._op(op,
other,
level=level,
fill_value=fill_value,
axis=axis,
reverse=True)
return result if after is None else after(result)
if name is None:
name = op.__name__
op_func.__name__ = name
rop_func.__name__ = "r" + name
return op_func, rop_func
def series_cmp(op, name=None, before=None, after=None):
def cmp_func(self, other, level=None, axis=0):
seq = self if before is None else before(self)
result = seq._op(op, other, level=level, axis=axis)
return result if after is None else after(result)
cmp_func.__name__ = op.__name__ if name is None else name
return cmp_func
class DataFrame(base.BaseFrame, generic.GenericMixin, ops_mixin.OpsMixin):
ndim = 2
_AXIS_MAPPER = utils.merge(base.BaseFrame._AXIS_MAPPER, {
1: 1,
"columns": 1
})
def __getattr__(self, name):
try:
col = self.__dict__["_columns"].get_loc(name)
return self._seq_at(col)
except KeyError:
return self.__getattribute__(name)
def _seq_at(self, i, name=None):
""" Return the Series corresponding to column i. """
if name is None:
name = self._columns[i]
query = sa.select(self._idx() + [self._col_at(i)])
return Series(self._index, pd.Index([name]), query.cte(), name)
@property
def columns(self):
return self._columns
def iterrows(self):
for row in self._fetch():
idx = row[:len(self._index)] if self._is_mindex else row[0]
data = pd.Series(row[len(self._index):], index=self._columns)
yield idx, data
def iteritems(self):
for i, col in enumerate(self._columns):
yield col, self._seq_at(i, name=col)
def itertuples(self, index=True, name='Pandas'):
fields = list(self._columns)
if index:
fields.insert(0, "Index")
named_tuple = collections.namedtuple(name, fields, rename=True)
for row in self._fetch():
if index:
idx = row[:len(self._index)] if self._is_mindex else row[0]
yield named_tuple(idx, *row[len(self._index):])
else:
yield named_tuple(*row[len(self._index):])
def _get_value(self, index, col, takeable=False):
if takeable:
col = utils.wrap(col, len(self._columns))
if col < 0 or col >= len(self._columns):
# TODO Monitor changes in pandas and adjust "axis 0"
#
# While technically it should be axis 1, pandas 1.2.3
# says axis 0 in the corresponding exception.
raise IndexError(f"index {col} is out of bounds for "
f"axis 0 with size {len(self._columns)}")
row_count = len(self)
index = utils.wrap(index, row_count)
if index < 0 or index >= row_count:
raise IndexError(f"index {index} is out of bounds for "
f"axis 0 with size {row_count}")
col = sa.select([self._col_at(col)])
return col.limit(1).offset(index).scalar()
raise NotImplementedError
@utils.copied
def _op(self,
op,
other,
axis="columns",
level=None,
fill_value=None,
reverse=False):
axis = 1 if axis is None else self._get_axis(axis)
def app_op(lhs, rhs):
if reverse:
lhs, rhs = rhs, lhs
result = coercion.app_op_coerced(op, lhs, rhs)
if fill_value is None:
return result
return sa.func.coalesce(result, fill_value)
if pd.api.types.is_scalar(other):
cols = [app_op(c, other) for c in self._cols()]
self._cte = sa.select(self._idx() + cols).cte()
return
if isinstance(other, (Series, pd.Series)):
other = Series.from_pandas(other, optional=True)
if axis == 1:
columns, idxers = self._join_cols(other.index)
other = list(other)
other.append(sa.sql.expression.Null()) # other[-1] => NULL
cols = [app_op(self._col_at(i), other[j]) for i, j in idxers]
self._cte = sa.select(self._idx() + cols).cte()
self._columns = columns
return
cols = [app_op(c, other._the_col) for c in self._cols()]
self._join_idx(other, cols, level=level, inplace=True)
return
if isinstance(other, (DataFrame, pd.DataFrame)):
other = DataFrame.from_pandas(other)
if self._cte == other._cte:
# Ensure different names for self join
self._cte = self._cte.alias()
columns, idxers = self._join_cols(other._columns)
cols = [
app_op(self._col_at(i), other._col_at(j)) for i, j in idxers
]
self._join_idx(other, cols, level=level, inplace=True)
self._columns = columns
return
if pd.api.types.is_list_like(other):
other = list(other)
if axis == 1:
num_cols = len(self._columns)
if len(other) != num_cols:
raise ValueError(f"Unable to coerce to Series, length "
f"must be {num_cols}: given {len(other)}")
cols = [
app_op(self._col_at(i), other[i]) for i in range(num_cols)
]
self._cte = sa.select(self._idx() + cols).cte()
return
num_rows = len(self)
if len(other) != num_rows:
raise ValueError(f"Unable to coerce to Series, length "
f"must be {num_rows}: given {len(other)}")
other = Series.from_list(other)
other_rowid = other._idx_at(0)
this, other, joined = self._paste_join(other, other_rowid)
cols = [app_op(c, other._the_col) for c in this._cols()]
query = sa.select(this._idx() + cols).select_from(joined)
self._cte = query.cte()
return
raise TypeError(f"Cannot broadcast np.ndarray with "
f"operand of type {type(other)}")
add, radd = dataframe_op(operator.add)
sub, rsub = dataframe_op(operator.sub)
mul, rmul = dataframe_op(operator.mul)
div, rdiv = dataframe_op(operator.truediv, name="div")
truediv, rtruediv = dataframe_op(operator.truediv)
floordiv, rfloordiv = dataframe_op(operator.floordiv)
mod, rmod = dataframe_op(operator.mod)
pow, rpow = dataframe_op(operator.pow)
eq = dataframe_cmp(operator.eq)
ne = dataframe_cmp(operator.ne)
le = dataframe_cmp(operator.le)
lt = dataframe_cmp(operator.lt)
ge = dataframe_cmp(operator.ge)
gt = dataframe_cmp(operator.gt)
@utils.copied
def clip(self, lower=None, upper=None, axis=None, *args, **kwargs):
if axis is None:
if not pd.api.types.is_scalar(lower):
raise ValueError("Must specify axis=0 or 1")
if not pd.api.types.is_scalar(upper):
raise ValueError("Must specify axis=0 or 1")
self._op(sa.func.greatest, lower, axis=axis, inplace=True)
self._op(sa.func.least, upper, axis=axis, inplace=True)
@utils.copied
def applymap(self, func, na_action=None):
if na_action not in (None, 'ignore'):
raise ValueError(f"na_action must be 'ignore' or None. "
f"Got {repr(na_action)}")
def app_func(col):
if na_action is None:
return func(col)
return sa.case((col.is_(None), col), else_=func(col))
self._app(app_func, inplace=True)
@utils.copied
def add_prefix(self, prefix):
columns = map(lambda c: prefix + str(c), self._columns)
self._columns = pd.Index(columns)
@utils.copied
def add_suffix(self, suffix):
columns = map(lambda c: str(c) + suffix, self._columns)
self._columns = pd.Index(columns)
def to_pandas(self):
index = []
columns = []
for row in self._fetch():
columns.append(row[len(self._index):])
idx = row[:len(self._index)] if self._is_mindex else row[0]
index.append(idx)
if self._is_mindex:
index = pd.MultiIndex.from_tuples(index, names=self._index)
else:
index = pd.Index(index, name=self._index[0])
df = pd.DataFrame.from_records(columns, columns=self._columns)
return df.set_index(index)
@staticmethod
def from_pandas(df, optional=False):
if not isinstance(df, pd.DataFrame):
if optional:
return df
raise TypeError("Must be a pandas DataFrame")
query = sa.union_all(
*[row_to_query(index, data) for index, data in df.iterrows()])
query.bind = db.metadata().bind
index = pd.Index(df.index.names)
return DataFrame(index, df.columns, query.cte())
@staticmethod
def from_table(table, schema=None, columns=None, index=None):
"""
Load table from the database as a DataFrame.
If columns is not None, it is taken as an ordered list of
columns to be included in the DataFrame.
If index is a list-like object, it is taken as an ordered
list of columns whose values are the DataFrame's index.
Otherwise, if index is not None, it is taken as the name
of the column to become the DataFrame's index.
"""
tbl = sa.Table(table,
db.metadata(),
schema=schema,
extend_existing=True,
autoload=True)
cols = [c.name for c in tbl.columns]
if index is None:
idx = [sa.func.row_number().over() - 1]
index = pd.Index((None, ))
else:
if not pd.api.types.is_list_like(index):
index = (index, )
index = pd.Index(index)
for i in index:
cols.pop(cols.index(i))
idx = [tbl.columns[i].label(None) for i in index]
if columns is None:
columns = pd.Index(cols)
else:
columns = pd.Index(columns)
for c in columns:
cols.index(c)
cols = [tbl.columns[i].label(None) for i in columns]
query = sa.select(idx + cols)
return DataFrame(index, columns, query.cte())
class Series(base.BaseFrame, generic.GenericMixin, ops_mixin.OpsMixin):
ndim = 1
def __init__(self, index, columns, cte, name):
super().__init__(index, columns, cte)
self.name = name
def __iter__(self):
for row in self._fetch():
yield row[-1]
@property
def _the_col(self):
""" Return THE column of the Series. """
return self._col_at(0)
def iteritems(self):
for row in self._fetch():
idx = row[:-1] if self._is_mindex else row[0]
yield idx, row[-1]
def _get_value(self, label, takeable=False):
if takeable:
row_count = len(self)
label = utils.wrap(label, row_count)
if label < 0 or label > row_count:
raise IndexError(f"index {label} is out of bounds "
f"for axis 0 with size {row_count}")
col = sa.select([self._the_col])
return col.limit(1).offset(label).scalar()
raise NotImplementedError
@utils.copied
def _op(self,
op,
other,
level=None,
fill_value=None,
axis=0,
reverse=False,
lax=True):
if axis is not None:
# Since there is only one possible axis for Series,
# we don't need to do anything besides validation.
self._get_axis(axis)
def app_op(lhs, rhs):
if reverse:
lhs, rhs = rhs, lhs
result = coercion.app_op_coerced(op, lhs, rhs)
if fill_value is None:
return result
return sa.func.coalesce(result, fill_value)
if pd.api.types.is_scalar(other):
col = app_op(self._the_col, other)
self._cte = sa.select(self._idx() + [col]).cte()
return
if isinstance(other, (Series, pd.Series)):
other = Series.from_pandas(other, optional=True)
if self._cte == other._cte:
# Ensure different names for self join
self._cte = self._cte.alias()
col = app_op(self._the_col, other._the_col)
self._join_idx(other, [col], level=level, inplace=True)
return
if isinstance(other, (DataFrame, pd.DataFrame)):
other = DataFrame.from_pandas(other, optional=True)
return other.radd(self,
axis=axis,
level=level,
fill_value=fill_value)
if pd.api.types.is_list_like(other):
other = list(other)
if lax and len(other) == 1:
col = app_op(self._the_col, other[0])
self._cte = sa.select(self._idx() + [col]).cte()
return
row_count = len(self)
if len(other) != row_count:
if reverse:
lhs, rhs = len(other), row_count
else:
lhs, rhs = row_count, len(other)
raise ValueError(f"operands could not be broadcast together "
f"with shapes ({lhs},) ({rhs},)")
other = Series.from_list(other)
other_rowid = other._idx_at(0)
this, other, joined = self._paste_join(other, other_rowid)
col = app_op(this._the_col, other._the_col)
query = sa.select(this._idx() + [col]).select_from(joined)
self._cte = query.cte()
return
raise TypeError(f"Cannot broadcast np.ndarray with "
f"operand of type {type(other)}")
add, radd = series_op(operator.add)
sub, rsub = series_op(operator.sub)
mul, rmul = series_op(operator.mul)
div, rdiv = series_op(operator.truediv, name="div")
truediv, rtruediv = series_op(operator.truediv)
floordiv, rfloordiv = series_op(operator.floordiv)
mod, rmod = series_op(operator.mod)
pow, rpow = series_op(operator.pow)
eq = series_cmp(operator.eq)
ne = series_cmp(operator.ne)
le = series_cmp(operator.le)
lt = series_cmp(operator.lt)
ge = series_cmp(operator.ge)
gt = series_cmp(operator.gt)
@utils.copied
def clip(self, lower=None, upper=None, axis=None, *args, **kwargs):
self._op(sa.func.greatest, lower, axis=axis, inplace=True, lax=False)
self._op(sa.func.least, upper, axis=axis, inplace=True, lax=False)
@utils.copied
def add_prefix(self, prefix):
idx = map(lambda i: sa.func.concat(prefix, i), self._idx())
self._cte = sa.select(idx + self._cols()).cte()
@utils.copied
def add_suffix(self, suffix):
idx = map(lambda i: sa.func.concat(i, suffix), self._idx())
self._cte = sa.select(idx + self._cols()).cte()
def to_pandas(self):
index = []
value = []
for row in self._fetch():
idx = row[:-1] if self._is_mindex else row[0]
index.append(idx)
value.append(row[-1])
if self._is_mindex:
index = pd.MultiIndex.from_tuples(index, names=self._index)
else:
index =
|
pd.Index(index, name=self._index[0])
|
pandas.Index
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
|
pd.Timestamp('1970-01-01 00:00:00.000000006')
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from scipy.optimize import minimize
from utils import get_next_gw, time_decay
from ranked_probability_score import ranked_probability_score, match_outcome
class Bradley_Terry:
""" Model game outcomes using logistic distribution """
def __init__(
self,
games,
threshold=0.1,
scale=1,
parameters=None,
decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
threshold (float): Threshold to differentiate team performances
scale (float): Variance of strength ratings
parameters (array): Initial parameters to use
decay (boolean): Apply time decay
"""
self.games = games.loc[:, [
"score1", "score2", "team1", "team2", "date"]]
self.games = self.games.dropna()
self.games["date"] = pd.to_datetime(self.games["date"])
self.games["days_since"] = (
self.games["date"].max() - self.games["date"]).dt.days
self.games["weight"] = (
time_decay(0.0026, self.games["days_since"]) if decay else 1)
self.decay = decay
self.games["score1"] = self.games["score1"].astype(int)
self.games["score2"] = self.games["score2"].astype(int)
self.teams = np.sort(np.unique(self.games["team1"]))
self.league_size = len(self.teams)
self.threshold = threshold
self.scale = scale
# Initial parameters
if parameters is None:
self.parameters = np.concatenate((
np.random.uniform(0, 1, (self.league_size)), # Strength
[.1], # Home advantage
))
else:
self.parameters = parameters
def likelihood(self, parameters, games):
""" Perform sample prediction and compare with outcome
Args:
parameters (pd.DataFrame): Current estimate of the parameters
games (pd.DataFrame): Fixtures
Returns:
(float): Likelihood of the estimated parameters
"""
parameter_df = (
pd.DataFrame()
.assign(rating=parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(
games,
parameter_df,
left_on='team1',
right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
)
outcome = match_outcome(fixtures_df)
outcome_ma = np.ones((fixtures_df.shape[0], 3))
outcome_ma[np.arange(0, fixtures_df.shape[0]), outcome] = 0
odds = np.zeros((fixtures_df.shape[0], 3))
odds[:, 0] = (
1 / (1 + np.exp(
-(
fixtures_df["rating1"] + parameters[-1] -
fixtures_df["rating2"] - self.threshold
) / self.scale)
)
)
odds[:, 2] = (
1 / (1 + np.exp(
-(
fixtures_df["rating2"] - parameters[-1] -
fixtures_df["rating1"] - self.threshold
) / self.scale)
)
)
odds[:, 1] = 1 - odds[:, 0] - odds[:, 2]
return - np.power(
np.ma.masked_array(odds, outcome_ma),
np.repeat(
np.array(fixtures_df["weight"].values).reshape(-1, 1),
3,
axis=1)
).sum()
def maximum_likelihood_estimation(self):
"""
Maximum likelihood estimation of the model parameters for team
strengths and the home field advantage.
"""
# Set strength ratings to have unique set of values for reproducibility
constraints = [{
"type": "eq",
"fun": lambda x:
sum(x[: self.league_size]) - self.league_size
}]
# Set the maximum and minimum values the parameters can take
bounds = [(0, 3)] * self.league_size
bounds += [(0, 1)]
self.solution = minimize(
self.likelihood,
self.parameters,
args=self.games,
constraints=constraints,
bounds=bounds,
options={'disp': False, 'maxiter': 100})
self.parameters = self.solution["x"]
def predict(self, games):
""" Predict score for several fixtures
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with appended odds
"""
parameter_df = (
pd.DataFrame()
.assign(rating=self.parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=self.parameters[-1])
)
def synthesize_odds(row):
""" Lambda function that parses row by row to compute score matrix
Args:
row (array): Fixture
Returns:
(tuple): Home and Away win and clean sheets odds
"""
home_win_p = (
1 / (
1 + np.exp(
-(
row["rating1"] + row["home_adv"] -
row["rating2"] - self.threshold) / self.scale
)
)
)
away_win_p = (
1 / (
1 + np.exp(
-(
row["rating2"] - row["home_adv"] -
row["rating1"] - self.threshold) / self.scale
)
)
)
draw_p = 1 - home_win_p - away_win_p
return home_win_p, draw_p, away_win_p
(
fixtures_df["home_win_p"],
fixtures_df["draw_p"],
fixtures_df["away_win_p"]
) = zip(*fixtures_df.apply(
lambda row: synthesize_odds(row), axis=1))
return fixtures_df
def evaluate(self, games):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
Returns:
pd.DataFrame: df with appended metrics
"""
fixtures_df = self.predict(games)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score(
[row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
def backtest(
self,
train_games,
test_season,
path='',
cold_start=False,
save=True):
""" Test the model's accuracy on past/finished games by iteratively
training and testing on parts of the data.
Args:
train_games (pd.DataFrame): All the training samples
test_season (int): Season to use a test set
path (string): Path extension to adjust to ipynb use
cold_start (boolean): Resume training with random parameters
save (boolean): Save predictions to disk
Returns:
(float): Evaluation metric
"""
# Get training data
self.train_games = train_games
# Initialize model
self.__init__(self.train_games[
self.train_games['season'] != test_season],
decay=self.decay)
# Initial train on past seasons
self.maximum_likelihood_estimation()
# Get test data
# Separate testing based on per GW intervals
fixtures = (
pd.read_csv(
f"{path}data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = (
pd.to_datetime(fixtures["kickoff_time"]).dt.date)
# Get only EPL games from the test season
self.test_games = (
self.train_games
.loc[self.train_games['league_id'] == 2411]
.loc[self.train_games['season'] == test_season]
.dropna()
)
self.test_games["kickoff_time"] = (
pd.to_datetime(self.test_games["date"]).dt.date)
# Merge on date
self.test_games = pd.merge(
self.test_games,
fixtures,
left_on='kickoff_time',
right_on='kickoff_time')
# Add the home team and away team index for running inference
idx = (
pd.DataFrame()
.assign(team=self.teams)
.assign(team_index=np.arange(self.league_size)))
self.test_games = (
pd.merge(self.test_games, idx, left_on="team1", right_on="team")
.rename(columns={"team_index": "hg"})
.drop(["team"], axis=1)
.drop_duplicates()
.merge(idx, left_on="team2", right_on="team")
.rename(columns={"team_index": "ag"})
.drop(["team"], axis=1)
.sort_values("date")
)
predictions = pd.DataFrame()
for gw in tqdm(range(1, 39)):
# For each GW of the season
if gw in self.test_games['event'].values:
# Handle case when the season is not finished
# Run inference on the specific GW and save data.
predictions = pd.concat([
predictions,
self.evaluate(
self.test_games[self.test_games['event'] == gw])
])
if cold_start:
previous_parameters = None
else:
previous_parameters = self.parameters
# Retrain model with the new GW added to the train set.
self.__init__(
pd.concat([
self.train_games[
self.train_games['season'] != test_season],
self.test_games[self.test_games['event'] <= gw]
])
.drop(columns=['ag', 'hg']),
parameters=previous_parameters,
decay=self.decay)
self.maximum_likelihood_estimation()
if save:
(
predictions
.loc[:, [
'date', 'team1', 'team2', 'event', 'hg', 'ag',
'rating1', 'rating2', 'home_adv',
'home_win_p', 'draw_p', 'away_win_p']]
.to_csv(
f"{path}data/predictions/fixtures/bradley_terry" +
f"{'' if self.decay else '_no_decay'}.csv",
index=False)
)
return predictions
if __name__ == "__main__":
with open('info.json') as f:
season = json.load(f)['season']
next_gw = get_next_gw()
df = pd.read_csv("data/fivethirtyeight/spi_matches.csv")
df = (
df
.loc[(df['league_id'] == 2411) | (df['league_id'] == 2412)]
)
# Get GW dates
fixtures = (
pd.read_csv("data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] =
|
pd.to_datetime(fixtures["kickoff_time"])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import matplotlib #
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn #
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, plot_roc_curve, roc_curve,confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import KFold
import mlflow
import mlflow.sklearn
print("Numpy: {}".format(np.__version__))
print("Pandas: {}".format(pd.__version__))
print("matplotlib: {}".format(matplotlib.__version__))
print("seaborn: {}".format(sns.__version__))
print("Scikit-Learn: {}".format(sklearn.__version__))
print("MLFlow: {}".format(mlflow.__version__))
data_path = "creditcard.csv"
df = pd.read_csv(data_path)
df = df.drop("Time", axis=1)
print(df.head())
normal = df[df.Class == 0].sample(frac=0.5, random_state=2020).reset_index(drop=True)
anomaly = df[df.Class == 1]
print(f"Normal: {normal.shape}")
print(f"Anomaly: {anomaly.shape}")
normal_train, normal_test = train_test_split(normal,test_size = 0.2, random_state = 2020)
anomaly_train, anomaly_test = train_test_split (anomaly, test_size = 0.2, random_state = 2020)
normal_train, normal_validate = train_test_split(normal_train, test_size = 0.25, random_state = 2020)
anomaly_train, anomaly_validate = train_test_split(anomaly_train, test_size = 0.25, random_state = 2020)
x_train = pd.concat((normal_train, anomaly_train))
x_test =
|
pd.concat((normal_test, anomaly_test))
|
pandas.concat
|
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestComputeMoments(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series, prefix="moments_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_moments(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.compute_moments(series)
def test7(self) -> None:
"""
Test series with `inf`.
"""
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[4] = np.inf
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeFracZero(hut.TestCase):
def test1(self) -> None:
data = [0.466667, 0.2, 0.13333, 0.2, 0.33333]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.4,
0.2,
0.4,
0.0,
0.6,
0.4,
0.6,
0.2,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected =
|
pd.Series(data=data, index=index)
|
pandas.Series
|
"""Tests relating to the ModelIsotherm class."""
import pandas
import pytest
from matplotlib.testing.decorators import cleanup
from pandas.testing import assert_series_equal
import pygaps
import pygaps.modelling as pgm
import pygaps.parsing as pgp
import pygaps.utilities.exceptions as pgEx
from ..characterisation.conftest import DATA
from ..characterisation.conftest import DATA_N77_PATH
from .conftest import LOADING_AT_PARAM
from .conftest import LOADING_PARAM
from .conftest import PRESSURE_AT_PARAM
from .conftest import PRESSURE_PARAM
@pytest.mark.core
class TestModelConvenience():
"""Test the convenience model function."""
def test_model_isotherm(self, basic_pointisotherm):
pgm.model_iso(basic_pointisotherm, model="Henry")
@pytest.mark.core
class TestModelIsotherm():
"""Test the ModelIsotherm class."""
def test_isotherm_create(self):
"""Check isotherm can be created from basic data."""
isotherm_param = {
'loading_key': 'loading',
'pressure_key': 'pressure',
'material': 'carbon',
'adsorbate': 'nitrogen',
'temperature': 77,
}
pressure = [1, 2, 3, 4, 5, 3, 2]
loading = [1, 2, 3, 4, 5, 3, 2]
isotherm_data = pandas.DataFrame({
'pressure': pressure,
'loading': loading
})
# regular creation
pygaps.ModelIsotherm(
pressure=pressure,
loading=loading,
model='Henry',
**isotherm_param
)
# regular creation, DataFrame
pygaps.ModelIsotherm(
isotherm_data=isotherm_data, model='Henry', **isotherm_param
)
# regular creation, desorption
pygaps.ModelIsotherm(
isotherm_data=isotherm_data,
model='Henry',
branch='des',
**isotherm_param
)
# regular creation, guessed parameters
pygaps.ModelIsotherm(
isotherm_data=isotherm_data,
model='Henry',
param_guess={'K': 1.0},
**isotherm_param
)
# regular creation, with bounds
pygaps.ModelIsotherm(
isotherm_data=isotherm_data,
model='Henry',
param_guess={'K': 1.0},
param_bounds={'K': [0, 100]},
**isotherm_param
)
# Missing pressure/loading
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm(
pressure=pressure, loading=None, **isotherm_param
)
# Missing model
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm(isotherm_data=isotherm_data, **isotherm_param)
# Wrong model
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm(
isotherm_data=isotherm_data, model='Wrong', **isotherm_param
)
# Wrong branch
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm(
isotherm_data=isotherm_data,
model='Henry',
branch='random',
)
# Wrong parameters
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm(
isotherm_data=isotherm_data,
model='Henry',
param_guess={'K9': 'woof'},
**isotherm_param
)
def test_isotherm_create_from_model(self, basic_isotherm):
"""Check isotherm can be created from a model."""
model = pygaps.modelling.get_isotherm_model('Henry')
pygaps.ModelIsotherm(
model=model,
material='Test',
temperature=303,
adsorbate='nitrogen'
)
def test_isotherm_create_from_isotherm(self, basic_isotherm):
"""Check isotherm can be created from Isotherm."""
pygaps.ModelIsotherm.from_isotherm(
basic_isotherm,
isotherm_data=pandas.DataFrame({
'pressure': [1, 2, 3, 4, 5, 3, 2],
'loading': [1, 2, 3, 4, 5, 3, 2]
}),
pressure_key='pressure',
loading_key='loading',
model='Henry',
)
def test_isotherm_create_from_pointisotherm(self, basic_pointisotherm):
"""Check isotherm can be created from PointIsotherm."""
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm.from_pointisotherm(basic_pointisotherm)
pygaps.ModelIsotherm.from_pointisotherm(
basic_pointisotherm,
model='Henry',
)
@cleanup
@pytest.mark.parametrize(
'file, ', [(data['file']) for data in list(DATA.values())]
)
def test_isotherm_create_guess(self, file):
"""Check isotherm can be guessed from PointIsotherm."""
filepath = DATA_N77_PATH / file
isotherm = pgp.isotherm_from_json(filepath)
pygaps.ModelIsotherm.from_pointisotherm(
isotherm, model='guess', verbose=True
)
pygaps.ModelIsotherm.from_pointisotherm(
isotherm, model=['Henry', 'Langmuir'], verbose=True
)
with pytest.raises(pgEx.ParameterError):
pygaps.ModelIsotherm.from_pointisotherm(
isotherm, model=['Henry', 'DummyModel'], verbose=True
)
##########################
@pytest.mark.parametrize(
'expected, parameters',
[
pytest.param(1, {'branch': 'des'},
marks=pytest.mark.xfail), # Wrong branch
] + PRESSURE_PARAM
)
def test_isotherm_ret_pressure(
self,
use_adsorbate,
basic_modelisotherm,
expected,
parameters,
):
"""Check that all the functions in ModelIsotherm return their specified parameter."""
assert basic_modelisotherm.pressure(
6,
**parameters,
)[0] == pytest.approx(expected, 1e-5)
def test_isotherm_ret_pressure_indexed(
self,
basic_modelisotherm,
):
"""Indexed option specified."""
assert basic_modelisotherm.pressure(5, indexed=True).equals(
pandas.Series([1.0, 2.25, 3.5, 4.75, 6.0], name='loading')
)
@pytest.mark.parametrize(
'expected, parameters',
[
pytest.param(1, {'branch': 'des'},
marks=pytest.mark.xfail), # Wrong branch
] + LOADING_PARAM
)
def test_isotherm_ret_loading(
self,
use_material,
use_adsorbate,
basic_modelisotherm,
expected,
parameters,
):
"""Check that all the functions in ModelIsotherm return their specified parameter."""
assert basic_modelisotherm.loading(
6,
**parameters,
)[0] == pytest.approx(expected, 1e-5)
def test_isotherm_ret_loading_indexed(
self,
basic_modelisotherm,
):
"""Indexed option specified."""
assert_series_equal(
basic_modelisotherm.loading(5, indexed=True),
|
pandas.Series([1.0, 2.25, 3.5, 4.75, 6.0])
|
pandas.Series
|
from numpy.testing import assert_equal, assert_allclose
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
from sciparse import find_lcr_dataline, parse_lcr_header, parse_lcr
from sciparse import convert_lcr_to_standard
import pytest
import os
@pytest.fixture
def filename():
dir_name = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dir_name, 'data/lcr_test_data.dat')
filename = str(filename)
return filename
@pytest.fixture
def metadata(filename):
metadata = parse_lcr_header(filename)
return metadata
@pytest.fixture
def data(filename):
data, metadata = parse_lcr(filename)
return data
def test_extract_header(metadata):
desiredMode = "SWEEP"
actualMode = metadata['mode']
assert_equal(actualMode, desiredMode)
desiredStartVoltage = 10
actualStartVoltage = metadata['start_voltage']
assert_equal(actualStartVoltage, desiredStartVoltage, err_msg="stop voltage")
desiredStopVoltage = -20
actualStopVoltage = metadata['stop_voltage']
assert_equal(actualStopVoltage, desiredStopVoltage, err_msg="start voltage")
desiredStepVoltage = -0.25
actualStepVoltage = metadata['step_voltage']
assert_equal(actualStepVoltage, desiredStepVoltage, err_msg="step voltage")
desiredPoints = 121
actualPoints = metadata['n_samples']
assert_equal(actualPoints, desiredPoints, err_msg="number points")
def test_find_datalines(filename):
desiredStartLine = 28
actualStartLine = find_lcr_dataline(filename)
assert_equal(actualStartLine, desiredStartLine)
def test_parse_data_header(data):
# Confirm we got the right data types
actualDataTypes = data.columns.values
desiredDataTypes = ['Z', 'THETA', 'BIAS', 'VM', 'IM']
assert_equal(actualDataTypes, desiredDataTypes)
def test_parse_data_length(data):
# Confirm we got the right length of data
desired_data_points = 121
actual_data_points = len(data)
assert_equal(actual_data_points, desired_data_points)
def test_parse_data(data):
desiredZData = 5.57723*1e6
actualZData = data['Z'].iloc[1]
assert_allclose(desiredZData, actualZData)
desiredBIASData = 8.5
actualBIASData = data['BIAS'].iloc[6]
assert_allclose(desiredBIASData, actualBIASData)
def test_convert_data_CP_RP():
frequency = 1 / (2*np.pi) * 1000 # 1krad/s
test_metadata = {'frequency': frequency}
test_data =
|
pd.DataFrame({'CP': [1e-9], 'RP': 1e6})
|
pandas.DataFrame
|
import pandas as pd
course_sales = { 'course': ['Python', 'Ruby', 'Excel', 'C++'],
'day':['Mon', 'Tue', 'Wed', 'Fri' ],
'price': [5, 10, 15, 20],
'sale': [2,3,4,5]
}
#print(course_sales)
# df_sales = pd.DataFrame(course_sales)
# print(df_sales)
# Create indidivdual lists from sales data
course = ['Python','Ruby','Excel','C++']
day = ['Mon', 'Tue', 'Tue', 'Wed']
price = [5,10,15,20]
sale = [2,3,5,7]
labels = ['Course', 'Day', 'Price', 'No. of Sales']
cols = [ course, day, price, sale ]
master_list = list(zip(labels, cols))
#print(master_list)
data = dict(master_list)
#data dictionary to dataframe
new_sales =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
class CleanFinData:
"""
Deal with outliers and N.A. values and any other cleaning that
has to be done. Get data from get_data -> Then feed into label_data
"""
def __init__(self):
self.data = pd.DataFrame()
@staticmethod
def get_daily_vol(close, span=100):
"""
daily vol, reindexed to close
:param close: close price pd Series
:param span: the days the exponential weighted moving std look back
:return: the ewm std of a stocks returns
"""
df0 = close.index.searchsorted(close.index-
|
pd.Timedelta(days=1)
|
pandas.Timedelta
|
import os
import ast
import pandas as pd
import numpy as np
from datetime import datetime
import time
import logging
level_config = {'debug': logging.DEBUG, 'info': logging.INFO}
FILE_SIZE = 500
BYTES_PER_PKT = 1500.0*8
MILLISEC_IN_SEC = 1000.0
EXP_LEN = 1000 # millisecond
class Metric:
def __init__(self,name,mi=1., lbd=1., mi_s=1.,log_level='debug'):
self.name = name
self.mi = mi
self.lbd = lbd
self.mi_s = mi_s
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
self.logger = logging.getLogger(__name__)
def calc(self,listRate,listRebuffer):
pass
def tabulation(self,listQoE,scores = pd.DataFrame(),abrRule = 'abr Rule',prefix=''):
scores_tmp =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.neighbors import KNeighborsRegressor
# Loading in the data
bball_df = pd.read_csv('data/bball_imp.csv').dropna(subset=['salary'])
# Split the dataset
df_train, df_test = train_test_split(bball_df, test_size=0.2, random_state=7)
X_train = df_train[["weight", "height", "draft_year", "draft_round",
"draft_peak", "team", "position", "country"]]
X_test = df_test[["weight", "height", "draft_year", "draft_round",
"draft_peak", "team", "position", "country"]]
y_train = df_train['salary']
y_test = df_test['salary']
# Split the numeric and categorical features
numeric_features = [ "weight",
"height",
"draft_year",
"draft_round",
"draft_peak"]
categorical_features = ["team", "position", "country"]
# Build a numeric pipeline
____ = ____
# Build a categorical pipeline
____ = ____
# Build a column transformer
____ = ____
# Build a main pipeline
____ = ____
# Cross validate
with_categorical_scores = cross_validate(main_pipe, X_train, y_train, return_train_score=True)
|
pd.DataFrame(with_categorical_scores)
|
pandas.DataFrame
|
import collections.abc as cabc
from copy import copy
from typing import Union, Optional, Sequence, Any, Mapping, List, Tuple
import numpy as np
import pandas as pd
from anndata import AnnData
from cycler import Cycler
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.api.types import is_categorical_dtype
from matplotlib import pyplot as pl, colors
from matplotlib.cm import get_cmap
from matplotlib import rcParams
from matplotlib import patheffects
from matplotlib.colors import Colormap, Normalize
from functools import partial
from .. import _utils
from .._utils import (
_IGraphLayout,
_FontWeight,
_FontSize,
ColorLike,
VBound,
circles,
check_projection,
check_colornorm,
)
from .._docs import (
doc_adata_color_etc,
doc_edges_arrows,
doc_scatter_embedding,
doc_scatter_spatial,
doc_show_save_ax,
)
from ... import logging as logg
from ..._settings import settings
from ..._utils import sanitize_anndata, _doc_params, Empty, _empty
from ..._compat import Literal
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def embedding(
adata: AnnData,
basis: str,
*,
color: Union[str, Sequence[str], None] = None,
gene_symbols: Optional[str] = None,
use_raw: Optional[bool] = None,
sort_order: bool = True,
edges: bool = False,
edges_width: float = 0.1,
edges_color: Union[str, Sequence[float], Sequence[str]] = 'grey',
neighbors_key: Optional[str] = None,
arrows: bool = False,
arrows_kwds: Optional[Mapping[str, Any]] = None,
groups: Optional[str] = None,
components: Union[str, Sequence[str]] = None,
layer: Optional[str] = None,
projection: Literal['2d', '3d'] = '2d',
scale_factor: Optional[float] = None,
color_map: Union[Colormap, str, None] = None,
cmap: Union[Colormap, str, None] = None,
palette: Union[str, Sequence[str], Cycler, None] = None,
na_color: ColorLike = "lightgray",
na_in_legend: bool = True,
size: Union[float, Sequence[float], None] = None,
frameon: Optional[bool] = None,
legend_fontsize: Union[int, float, _FontSize, None] = None,
legend_fontweight: Union[int, _FontWeight] = 'bold',
legend_loc: str = 'right margin',
legend_fontoutline: Optional[int] = None,
vmax: Union[VBound, Sequence[VBound], None] = None,
vmin: Union[VBound, Sequence[VBound], None] = None,
vcenter: Union[VBound, Sequence[VBound], None] = None,
norm: Union[Normalize, Sequence[Normalize], None] = None,
add_outline: Optional[bool] = False,
outline_width: Tuple[float, float] = (0.3, 0.05),
outline_color: Tuple[str, str] = ('black', 'white'),
ncols: int = 4,
hspace: float = 0.25,
wspace: Optional[float] = None,
title: Union[str, Sequence[str], None] = None,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
return_fig: Optional[bool] = None,
**kwargs,
) -> Union[Figure, Axes, None]:
"""\
Scatter plot for user specified embedding basis (e.g. umap, pca, etc)
Parameters
----------
basis
Name of the `obsm` basis to use.
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
check_projection(projection)
sanitize_anndata(adata)
# Setting up color map for continuous values
if color_map is not None:
if cmap is not None:
raise ValueError("Cannot specify both `color_map` and `cmap`.")
else:
cmap = color_map
cmap = copy(get_cmap(cmap))
cmap.set_bad(na_color)
kwargs["cmap"] = cmap
# Prevents warnings during legend creation
na_color = colors.to_hex(na_color, keep_alpha=True)
if size is not None:
kwargs['s'] = size
if 'edgecolor' not in kwargs:
# by default turn off edge color. Otherwise, for
# very small sizes the edge will not reduce its size
# (https://github.com/theislab/scanpy/issues/293)
kwargs['edgecolor'] = 'none'
if groups:
if isinstance(groups, str):
groups = [groups]
args_3d = dict(projection='3d') if projection == '3d' else {}
# Deal with Raw
if use_raw is None:
# check if adata.raw is set
use_raw = layer is None and adata.raw is not None
if use_raw and layer is not None:
raise ValueError(
"Cannot use both a layer and the raw representation. Was passed:"
f"use_raw={use_raw}, layer={layer}."
)
if wspace is None:
# try to set a wspace that is not too large or too small given the
# current figure size
wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02
if adata.raw is None and use_raw:
raise ValueError(
"`use_raw` is set to True but AnnData object does not have raw. "
"Please check."
)
# turn color into a python list
color = [color] if isinstance(color, str) or color is None else list(color)
if title is not None:
# turn title into a python list if not None
title = [title] if isinstance(title, str) else list(title)
# get the points position and the components list
# (only if components is not None)
data_points, components_list = _get_data_points(
adata, basis, projection, components, scale_factor
)
# Setup layout.
# Most of the code is for the case when multiple plots are required
# 'color' is a list of names that want to be plotted.
# Eg. ['Gene1', 'louvain', 'Gene2'].
# component_list is a list of components [[0,1], [1,2]]
if (
not isinstance(color, str)
and isinstance(color, cabc.Sequence)
and len(color) > 1
) or len(components_list) > 1:
if ax is not None:
raise ValueError(
"Cannot specify `ax` when plotting multiple panels "
"(each for a given value of 'color')."
)
if len(components_list) == 0:
components_list = [None]
# each plot needs to be its own panel
num_panels = len(color) * len(components_list)
fig, grid = _panel_grid(hspace, wspace, ncols, num_panels)
else:
if len(components_list) == 0:
components_list = [None]
grid = None
if ax is None:
fig = pl.figure()
ax = fig.add_subplot(111, **args_3d)
# turn vmax and vmin into a sequence
if isinstance(vmax, str) or not isinstance(vmax, cabc.Sequence):
vmax = [vmax]
if isinstance(vmin, str) or not isinstance(vmin, cabc.Sequence):
vmin = [vmin]
if isinstance(vcenter, str) or not isinstance(vcenter, cabc.Sequence):
vcenter = [vcenter]
if isinstance(norm, Normalize) or not isinstance(norm, cabc.Sequence):
norm = [norm]
if 's' in kwargs:
size = kwargs.pop('s')
if size is not None:
# check if size is any type of sequence, and if so
# set as ndarray
import pandas.core.series
if (
size is not None
and isinstance(size, (cabc.Sequence, pandas.core.series.Series, np.ndarray))
and len(size) == adata.shape[0]
):
size = np.array(size, dtype=float)
else:
size = 120000 / adata.shape[0]
# make the plots
axs = []
import itertools
idx_components = range(len(components_list))
# use itertools.product to make a plot for each color and for each component
# For example if color=[gene1, gene2] and components=['1,2, '2,3'].
# The plots are: [
# color=gene1, components=[1,2], color=gene1, components=[2,3],
# color=gene2, components = [1, 2], color=gene2, components=[2,3],
# ]
for count, (value_to_plot, component_idx) in enumerate(
itertools.product(color, idx_components)
):
color_source_vector = _get_color_source_vector(
adata,
value_to_plot,
layer=layer,
use_raw=use_raw,
gene_symbols=gene_symbols,
groups=groups,
)
color_vector, categorical = _color_vector(
adata,
value_to_plot,
color_source_vector,
palette=palette,
na_color=na_color,
)
# Order points
order = slice(None)
if sort_order is True and value_to_plot is not None and categorical is False:
# Higher values plotted on top, null values on bottom
order = np.argsort(-color_vector, kind="stable")[::-1]
elif sort_order and categorical:
# Null points go on bottom
order = np.argsort(~pd.isnull(color_source_vector), kind="stable")
# Set orders
if isinstance(size, np.ndarray):
size = np.array(size)[order]
color_source_vector = color_source_vector[order]
color_vector = color_vector[order]
_data_points = data_points[component_idx][order, :]
# if plotting multiple panels, get the ax from the grid spec
# else use the ax value (either user given or created previously)
if grid:
ax = pl.subplot(grid[count], **args_3d)
axs.append(ax)
if not (settings._frameon if frameon is None else frameon):
ax.axis('off')
if title is None:
if value_to_plot is not None:
ax.set_title(value_to_plot)
else:
ax.set_title('')
else:
try:
ax.set_title(title[count])
except IndexError:
logg.warning(
"The title list is shorter than the number of panels. "
"Using 'color' value instead for some plots."
)
ax.set_title(value_to_plot)
if not categorical:
vmin_float, vmax_float, vcenter_float, norm_obj = _get_vboundnorm(
vmin, vmax, vcenter, norm, count, color_vector
)
normalize = check_colornorm(
vmin_float,
vmax_float,
vcenter_float,
norm_obj,
)
else:
normalize = None
# make the scatter plot
if projection == '3d':
cax = ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
_data_points[:, 2],
marker=".",
c=color_vector,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
else:
scatter = (
partial(ax.scatter, s=size, plotnonfinite=True)
if scale_factor is None
else partial(circles, s=size, ax=ax) # size in circles is radius
)
if add_outline:
# the default outline is a black edge followed by a
# thin white edged added around connected clusters.
# To add an outline
# three overlapping scatter plots are drawn:
# First black dots with slightly larger size,
# then, white dots a bit smaller, but still larger
# than the final dots. Then the final dots are drawn
# with some transparency.
bg_width, gap_width = outline_width
point = np.sqrt(size)
gap_size = (point + (point * gap_width) * 2) ** 2
bg_size = (np.sqrt(gap_size) + (point * bg_width) * 2) ** 2
# the default black and white colors can be changes using
# the contour_config parameter
bg_color, gap_color = outline_color
# remove edge from kwargs if present
# because edge needs to be set to None
kwargs['edgecolor'] = 'none'
# remove alpha for outline
alpha = kwargs.pop('alpha') if 'alpha' in kwargs else None
ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
s=bg_size,
marker=".",
c=bg_color,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
s=gap_size,
marker=".",
c=gap_color,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
# if user did not set alpha, set alpha to 0.7
kwargs['alpha'] = 0.7 if alpha is None else alpha
cax = scatter(
_data_points[:, 0],
_data_points[:, 1],
marker=".",
c=color_vector,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
# remove y and x ticks
ax.set_yticks([])
ax.set_xticks([])
if projection == '3d':
ax.set_zticks([])
# set default axis_labels
name = _basis2name(basis)
if components is not None:
axis_labels = [name + str(x + 1) for x in components_list[component_idx]]
elif projection == '3d':
axis_labels = [name + str(x + 1) for x in range(3)]
else:
axis_labels = [name + str(x + 1) for x in range(2)]
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
if projection == '3d':
# shift the label closer to the axis
ax.set_zlabel(axis_labels[2], labelpad=-7)
ax.autoscale_view()
if edges:
_utils.plot_edges(ax, adata, basis, edges_width, edges_color, neighbors_key)
if arrows:
_utils.plot_arrows(ax, adata, basis, arrows_kwds)
if value_to_plot is None:
# if only dots were plotted without an associated value
# there is not need to plot a legend or a colorbar
continue
if legend_fontoutline is not None:
path_effect = [
patheffects.withStroke(linewidth=legend_fontoutline, foreground='w')
]
else:
path_effect = None
# Adding legends
if categorical:
_add_categorical_legend(
ax,
color_source_vector,
palette=_get_palette(adata, value_to_plot),
scatter_array=_data_points,
legend_loc=legend_loc,
legend_fontweight=legend_fontweight,
legend_fontsize=legend_fontsize,
legend_fontoutline=path_effect,
na_color=na_color,
na_in_legend=na_in_legend,
multi_panel=bool(grid),
)
else:
# TODO: na_in_legend should have some effect here
pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)
if return_fig is True:
return fig
axs = axs if grid else ax
_utils.savefig_or_show(basis, show=show, save=save)
if show is False:
return axs
def _panel_grid(hspace, wspace, ncols, num_panels):
from matplotlib import gridspec
n_panels_x = min(ncols, num_panels)
n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)
# each panel will have the size of rcParams['figure.figsize']
fig = pl.figure(
figsize=(
n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),
n_panels_y * rcParams['figure.figsize'][1],
),
)
left = 0.2 / n_panels_x
bottom = 0.13 / n_panels_y
gs = gridspec.GridSpec(
nrows=n_panels_y,
ncols=n_panels_x,
left=left,
right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x,
bottom=bottom,
top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y,
hspace=hspace,
wspace=wspace,
)
return fig, gs
def _get_vboundnorm(
vmin: Sequence[VBound],
vmax: Sequence[VBound],
vcenter: Sequence[VBound],
norm: Sequence[Normalize],
index: int,
color_vector: Sequence[float],
) -> Tuple[Union[float, None], Union[float, None]]:
"""
Evaluates the value of vmin, vmax and vcenter, which could be a
str in which case is interpreted as a percentile and should
be specified in the form 'pN' where N is the percentile.
Eg. for a percentile of 85 the format would be 'p85'.
Floats are accepted as p99.9
Alternatively, vmin/vmax could be a function that is applied to
the list of color values (`color_vector`). E.g.
def my_vmax(color_vector): np.percentile(color_vector, p=80)
Parameters
----------
index
This index of the plot
color_vector
List or values for the plot
Returns
-------
(vmin, vmax, vcenter, norm) containing None or float values for
vmin, vmax, vcenter and matplotlib.colors.Normalize or None for norm.
"""
out = []
for v_name, v in [('vmin', vmin), ('vmax', vmax), ('vcenter', vcenter)]:
if len(v) == 1:
# this case usually happens when the user sets eg vmax=0.9, which
# is internally converted into list of len=1, but is expected that this
# value applies to all plots.
v_value = v[0]
else:
try:
v_value = v[index]
except IndexError:
logg.error(
f"The parameter {v_name} is not valid. If setting multiple {v_name} values,"
f"check that the length of the {v_name} list is equal to the number "
"of plots. "
)
v_value = None
if v_value is not None:
if isinstance(v_value, str) and v_value.startswith('p'):
try:
float(v_value[1:])
except ValueError:
logg.error(
f"The parameter {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check the correct format for percentiles."
)
# interpret value of vmin/vmax as quantile with the following syntax 'p99.9'
v_value = np.nanpercentile(color_vector, q=float(v_value[1:]))
elif callable(v_value):
# interpret vmin/vmax as function
v_value = v_value(color_vector)
if not isinstance(v_value, float):
logg.error(
f"The return of the function given for {v_name} is not valid. "
"Please check that the function returns a number."
)
v_value = None
else:
try:
float(v_value)
except ValueError:
logg.error(
f"The given {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check that the value given is a valid number, a string "
f"starting with 'p' for percentiles or a valid function."
)
v_value = None
out.append(v_value)
out.append(norm[0] if len(norm) == 1 else norm[index])
return tuple(out)
def _wraps_plot_scatter(wrapper):
import inspect
params = inspect.signature(embedding).parameters.copy()
wrapper_sig = inspect.signature(wrapper)
wrapper_params = wrapper_sig.parameters.copy()
params.pop("basis")
params.pop("kwargs")
wrapper_params.pop("adata")
params.update(wrapper_params)
annotations = {
k: v.annotation
for k, v in params.items()
if v.annotation != inspect.Parameter.empty
}
if wrapper_sig.return_annotation is not inspect.Signature.empty:
annotations["return"] = wrapper_sig.return_annotation
wrapper.__signature__ = inspect.Signature(
list(params.values()), return_annotation=wrapper_sig.return_annotation
)
wrapper.__annotations__ = annotations
return wrapper
# API
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'tsne', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in Diffusion Map basis.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'diffmap', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def draw_graph(
adata: AnnData, *, layout: Optional[_IGraphLayout] = None, **kwargs
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in graph-drawing basis.
Parameters
----------
{adata_color_etc}
layout
One of the :func:`~scanpy.tl.draw_graph` layouts.
By default, the last computed layout is used.
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if layout is None:
layout = str(adata.uns['draw_graph']['params']['layout'])
basis = 'draw_graph_' + layout
if 'X_' + basis not in adata.obsm_keys():
raise ValueError(
'Did not find {} in adata.obs. Did you compute layout {}?'.format(
'draw_graph_' + layout, layout
)
)
return embedding(adata, basis, **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def pca(
adata,
*,
annotate_var_explained: bool = False,
show: Optional[bool] = None,
return_fig: Optional[bool] = None,
save: Union[bool, str, None] = None,
**kwargs,
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in PCA coordinates.
Use the parameter `annotate_var_explained` to annotate the explained variance.
Parameters
----------
{adata_color_etc}
annotate_var_explained
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if not annotate_var_explained:
return embedding(
adata, 'pca', show=show, return_fig=return_fig, save=save, **kwargs
)
else:
if 'pca' not in adata.obsm.keys() and 'X_pca' not in adata.obsm.keys():
raise KeyError(
f"Could not find entry in `obsm` for 'pca'.\n"
f"Available keys are: {list(adata.obsm.keys())}."
)
label_dict = {
'PC{}'.format(i + 1): 'PC{} ({}%)'.format(i + 1, round(v * 100, 2))
for i, v in enumerate(adata.uns['pca']['variance_ratio'])
}
if return_fig is True:
# edit axis labels in returned figure
fig = embedding(adata, 'pca', return_fig=return_fig, **kwargs)
for ax in fig.axes:
ax.set_xlabel(label_dict[ax.xaxis.get_label().get_text()])
ax.set_ylabel(label_dict[ax.yaxis.get_label().get_text()])
return fig
else:
# get the axs, edit the labels and apply show and save from user
axs = embedding(adata, 'pca', show=False, save=False, **kwargs)
if isinstance(axs, list):
for ax in axs:
ax.set_xlabel(label_dict[ax.xaxis.get_label().get_text()])
ax.set_ylabel(label_dict[ax.yaxis.get_label().get_text()])
else:
axs.set_xlabel(label_dict[axs.xaxis.get_label().get_text()])
axs.set_ylabel(label_dict[axs.yaxis.get_label().get_text()])
_utils.savefig_or_show('pca', show=show, save=save)
if show is False:
return axs
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_spatial=doc_scatter_spatial,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def spatial(
adata,
*,
basis: str = "spatial",
img: Union[np.ndarray, None] = None,
img_key: Union[str, None, Empty] = _empty,
library_id: Union[str, Empty] = _empty,
crop_coord: Tuple[int, int, int, int] = None,
alpha_img: float = 1.0,
bw: Optional[bool] = False,
size: float = 1.0,
scale_factor: Optional[float] = None,
spot_size: Optional[float] = None,
na_color: Optional[ColorLike] = None,
show: Optional[bool] = None,
return_fig: Optional[bool] = None,
save: Union[bool, str, None] = None,
**kwargs,
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in spatial coordinates.
This function allows overlaying data on top of images.
Use the parameter `img_key` to see the image in the background
And the parameter `library_id` to select the image.
By default, `'hires'` and `'lowres'` are attempted.
Use `crop_coord`, `alpha_img`, and `bw` to control how it is displayed.
Use `size` to scale the size of the Visium spots plotted on top.
As this function is designed to for imaging data, there are two key assumptions
about how coordinates are handled:
1. The origin (e.g `(0, 0)`) is at the top left – as is common convention
with image data.
2. Coordinates are in the pixel space of the source image, so an equal
aspect ratio is assumed.
If your anndata object has a `"spatial"` entry in `.uns`, the `img_key`
and `library_id` parameters to find values for `img`, `scale_factor`,
and `spot_size` arguments. Alternatively, these values be passed directly.
Parameters
----------
{adata_color_etc}
{scatter_spatial}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
Examples
--------
This function behaves very similarly to other embedding plots like
:func:`~scanpy.pl.umap`
>>> adata = sc.datasets.visium_sge("Targeted_Visium_Human_Glioblastoma_Pan_Cancer")
>>> sc.pp.calculate_qc_metrics(adata, inplace=True)
>>> sc.pl.spatial(adata, color="log1p_n_genes_by_counts")
See Also
--------
:func:`scanpy.datasets.visium_sge`
Example visium data.
:tutorial:`spatial/basic-analysis`
Tutorial on spatial analysis.
"""
# get default image params if available
library_id, spatial_data = _check_spatial_data(adata.uns, library_id)
img, img_key = _check_img(spatial_data, img, img_key, bw=bw)
spot_size = _check_spot_size(spatial_data, spot_size)
scale_factor = _check_scale_factor(
spatial_data, img_key=img_key, scale_factor=scale_factor
)
crop_coord = _check_crop_coord(crop_coord, scale_factor)
na_color = _check_na_color(na_color, img=img)
if bw:
cmap_img = "gray"
else:
cmap_img = None
circle_radius = size * scale_factor * spot_size * 0.5
axs = embedding(
adata,
basis=basis,
scale_factor=scale_factor,
size=circle_radius,
na_color=na_color,
show=False,
save=False,
**kwargs,
)
if not isinstance(axs, list):
axs = [axs]
for ax in axs:
cur_coords = np.concatenate([ax.get_xlim(), ax.get_ylim()])
if img is not None:
ax.imshow(img, cmap=cmap_img, alpha=alpha_img)
else:
ax.set_aspect("equal")
ax.invert_yaxis()
if crop_coord is not None:
ax.set_xlim(crop_coord[0], crop_coord[1])
ax.set_ylim(crop_coord[3], crop_coord[2])
else:
ax.set_xlim(cur_coords[0], cur_coords[1])
ax.set_ylim(cur_coords[3], cur_coords[2])
_utils.savefig_or_show('show', show=show, save=save)
if show is False or return_fig is True:
return axs
# Helpers
def _get_data_points(
adata, basis, projection, components, scale_factor
) -> Tuple[List[np.ndarray], List[Tuple[int, int]]]:
"""
Returns the data points corresponding to the selected basis, projection and/or components.
Because multiple components are given (eg components=['1,2', '2,3'] the
returned data are lists, containing each of the components. When only one component is plotted
the list length is 1.
Returns
-------
data_points
Each entry is a numpy array containing the data points
components
The cleaned list of components. Eg. [(0,1)] or [(0,1), (1,2)]
for components = [1,2] and components=['1,2', '2,3'] respectively
"""
if basis in adata.obsm.keys():
basis_key = basis
elif f"X_{basis}" in adata.obsm.keys():
basis_key = f"X_{basis}"
else:
raise KeyError(
f"Could not find entry in `obsm` for '{basis}'.\n"
f"Available keys are: {list(adata.obsm.keys())}."
)
n_dims = 2
if projection == '3d':
# check if the data has a third dimension
if adata.obsm[basis_key].shape[1] == 2:
if settings._low_resolution_warning:
logg.warning(
'Selected projections is "3d" but only two dimensions '
'are available. Only these two dimensions will be plotted'
)
else:
n_dims = 3
if components == 'all':
from itertools import combinations
r_value = 3 if projection == '3d' else 2
_components_list = np.arange(adata.obsm[basis_key].shape[1]) + 1
components = [
",".join(map(str, x)) for x in combinations(_components_list, r=r_value)
]
components_list = []
offset = 0
if basis == 'diffmap':
offset = 1
if components is not None:
# components have different formats, either a list with integers, a string
# or a list of strings.
if isinstance(components, str):
# eg: components='1,2'
components_list.append(
tuple(int(x.strip()) - 1 + offset for x in components.split(','))
)
elif isinstance(components, cabc.Sequence):
if isinstance(components[0], int):
# components=[1,2]
components_list.append(tuple(int(x) - 1 + offset for x in components))
else:
# in this case, the components are str
# eg: components=['1,2'] or components=['1,2', '2,3]
# More than one component can be given and is stored
# as a new item of components_list
for comp in components:
components_list.append(
tuple(int(x.strip()) - 1 + offset for x in comp.split(','))
)
else:
raise ValueError(
"Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`"
)
# check if the components are present in the data
try:
data_points = []
for comp in components_list:
data_points.append(adata.obsm[basis_key][:, comp])
except Exception: # TODO catch the correct exception
raise ValueError(
"Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`"
)
if basis == 'diffmap':
# remove the offset added in the case of diffmap, such that
# plot_scatter can print the labels correctly.
components_list = [
tuple(number - 1 for number in comp) for comp in components_list
]
else:
data_points = [np.array(adata.obsm[basis_key])[:, offset : offset + n_dims]]
components_list = []
if scale_factor is not None: # if basis need scale for img background
data_points[0] = np.multiply(data_points[0], scale_factor)
return data_points, components_list
def _add_categorical_legend(
ax,
color_source_vector,
palette: dict,
legend_loc: str,
legend_fontweight,
legend_fontsize,
legend_fontoutline,
multi_panel,
na_color,
na_in_legend: bool,
scatter_array=None,
):
"""Add a legend to the passed Axes."""
if na_in_legend and pd.isnull(color_source_vector).any():
if "NA" in color_source_vector:
raise NotImplementedError(
"No fallback for null labels has been defined if NA already in categories."
)
color_source_vector = color_source_vector.add_categories("NA").fillna("NA")
palette = palette.copy()
palette["NA"] = na_color
cats = color_source_vector.categories
if multi_panel is True:
# Shrink current axis by 10% to fit legend and match
# size of plots that are not categorical
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])
if legend_loc == 'right margin':
for label in cats:
ax.scatter([], [], c=palette[label], label=label)
ax.legend(
frameon=False,
loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(cats) <= 14 else 2 if len(cats) <= 30 else 3),
fontsize=legend_fontsize,
)
elif legend_loc == 'on data':
# identify centroids to put labels
all_pos = (
pd.DataFrame(scatter_array, columns=["x", "y"])
.groupby(color_source_vector, observed=True)
.median()
)
for label, x_pos, y_pos in all_pos.itertuples():
ax.text(
x_pos,
y_pos,
label,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize,
path_effects=legend_fontoutline,
)
# TODO: wtf
# this is temporary storage for access by other tools
_utils._tmp_cluster_pos = all_pos.values
def _get_color_source_vector(
adata, value_to_plot, use_raw=False, gene_symbols=None, layer=None, groups=None
):
"""
Get array from adata that colors will be based on.
"""
if value_to_plot is None:
# Points will be plotted with `na_color`. Ideally this would work
# with the "bad color" in a color map but that throws a warning. Instead
# _color_vector handles this.
# https://github.com/matplotlib/matplotlib/issues/18294
return np.broadcast_to(np.nan, adata.n_obs)
if (
gene_symbols is not None
and value_to_plot not in adata.obs.columns
and value_to_plot not in adata.var_names
):
# We should probably just make an index for this, and share it over runs
value_to_plot = adata.var.index[adata.var[gene_symbols] == value_to_plot][
0
] # TODO: Throw helpful error if this doesn't work
if use_raw and value_to_plot not in adata.obs.columns:
values = adata.raw.obs_vector(value_to_plot)
else:
values = adata.obs_vector(value_to_plot, layer=layer)
if groups and
|
is_categorical_dtype(values)
|
pandas.api.types.is_categorical_dtype
|
"""
Copyright (c) 2020, <NAME> <NAME>
All rights reserved.
This is an information tool to retrieve official business financials (income statements, balance sheets, and cashflow statements) for a sepcified range of times. The code aims to be as vallina as possible by minimizing the depndencies and packages ued to construct functions. This code can be used immediately off the shelf and assumes no more than the following packages to be installed. As a reminder, please ensure that your directory has enough space, ideally at least 100 MB for newly serialized reports to reside on the disk until you decide to clear them.
"""
import libraries
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
from selenium import webdriver
import os
import pickle
class Business:
# Define a default constructor for the Business object
def __init__(self, foreign, symbol, report_type, start_period, end_period ):
self.foreign=foreign
self.symbol=symbol
self.report_type=report_type
self.start_period=start_period
self.end_period=end_period
#-------------Retrieving Annual/Quarter Reports----------
# Define a function to store the url(s) to a company's annual or quarter report(s)
def ghost_report_url(self):
############## Check validity of inputs #############
## Error Message if the foreign argument is not logical
if (type(self.foreign)!=bool):
raise TypeError("Invalid foreign type: foreign argument must be logical- True or False")
## Error message if the inputted ticker symbol is not a string
if(type(self.symbol)!=str):
raise TypeError("Invalid ticker symbol type: symbol argument must be a string")
## Error message if the inputted report type is neither 'annual' or 'quarter'
if(self.report_type!='annual' and self.report_type!='quarter'):
raise TypeError("Invalid report type: only 'annual' or 'quarter' report type is allowed")
## Error message if the specified start period or(and) end period is(are) not valid
if ((len(str(self.start_period)))| (len(str(self.end_period)))!=8):
raise ValueError("Invalid start period or(and) end period(s): start_period and end_period arguments must be in the form yyyymmdd")
## Error message to warn that foreign quarterly reports are not available on the SEC Edgar database
if(self.foreign==True and self.report_type=='quarter'):
raise ValueError("Foreign quarterly report(s) not available: try 'annual' report instead")
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
################# Retrieving Annual Report(s) (10-K or 20-F) ################
if(self.report_type=='annual'):
# Get the url to the company's historic 10-K (including 10-K/A) or 20-F (including 20-F/A) filings(s)
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-k&dateb=&owner=exclude&count=100" if self.foreign==False else r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=20-f&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-K(include 10-K/A and others) or 20-F(include 20F/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-K or 20-F, given the company symbol and foreign logic
if len(filings_description_table[(filings_description_table["Filings"]=="10-K")|(filings_description_table["Filings"]=="20-F")])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-K or 20-F filing(s). raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[(filings_description_table["Filings"]=="10-K")| (filings_description_table["Filings"]=="20-F")].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-K or 20-F report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-K or 20-F report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
# Get report period(s), that is the 10-K or 20-F report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-K or 20F extracts
annual_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
annual_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
annual_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the annual report html
annual_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
annual_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
annual_report_url.append("annual report is not in HTML format")
else:
annual_report_url.append("annual report not available")
# Combine the company's report period(s), and annual report url(s) into a data frame
annual_report_df=pd.DataFrame({'report_periods':report_periods,'annual_report_url':annual_report_url,'annual_download_url':annual_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not annual_report_df.empty:
return annual_report_df
else:
return "No annual report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
################# Retrieving Quarter Report(s) (10-Q) #########################
if(self.report_type=='quarter'):
# Get the url to the company's historic 10-Q
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-q&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-Q(include 10-Q/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-Q, given the company symbol and foreign logic
if len(filings_description_table[filings_description_table["Filings"]=="10-Q"])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-Q. raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[filings_description_table["Filings"]=="10-Q"].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-Q report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-Q report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
## At this moment, documents before 2009 are not available. Documents of this type are not normally needed anyway
# Get report period(s), that is the 10-Q report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-Q extracts
quarter_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
quarter_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
quarter_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the quarterly report html
quarter_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
quarter_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
quarter_report_url.append("quarterly report is not in HTML format")
else:
quarter_report_url.append("quarterly report not available")
# Combine the company's report period(s), and quarterly report url(s) into a data frame
quarter_report_df=pd.DataFrame({'report_periods':report_periods,'quarter_report_url':quarter_report_url,'quarter_download_url':quarter_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not quarter_report_df.empty:
return quarter_report_df
else:
return "No quarter report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
#------------------------ Best-scrolled to the most relevant financial exhibit------------------------
# A function to exhibit financial statements
def financial_statements_exhibit(self):
## Errors checked in the ghost_report_url()
# Target annual financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'Financial Statements and Supplementary Data', 'Selected Financial Data'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements and Supplementary Data').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual financial statements of foreign businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'FINANCIAL STATEMENTS', 'Financial Statements', 'Selected Financial Data'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the most relevant financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
# Since the query is case insensitive, search in other cases
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target quarter financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position','Consolidated Statements of Cash Flows','Consolidated Income Statements' 'Consolidated Statements of Operations', 'FINANCIAL STATEMENTS', 'Financial Statements'
if(self.foreign==False and self.report_type=='quarter'):
# Import quarter_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up best-scrolled financial exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter financial statements require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled balance sheet section
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#------------ Best-scrolled to the most relevant risk factor exhibit------------
# A function to exhibit risk factors
def risk_factors_exhibit(self, risk_type):
## Previous errors checked in the ghost_report_url()
## Error message if the inputted risk type is neither 'enterprise' or 'market'
if(risk_type!='enterprise' and risk_type!='market'):
raise TypeError("Invalid risk type: only 'enterprise' or 'market' risk type is allowed")
########################### Enterprise Risk Exhibit ##################################
if(risk_type=='enterprise'):
# Target annual and quarter enterprise risk factors of U.S. businesses
# Prioritize in the order of 'Risk Factors','RISK FACTORS'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual enterprise risk factors of foreign businesses
# Prioritize in the order of 'Risk Factors', 'RISK FACTORS', 'KEY INFORMATION', 'Key Information'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
try:
driver.find_element_by_partial_link_text('Key Information').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
########################### Market Risk Exhibit #############################
elif(risk_type=='market'):
# Target annual and quarter market risk factors of U.S. businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk', 'QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual market risk factors of foreign businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk','QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#----------------------------- Curate Financial Statements -----------------------------------------
# A function to curate income statements, balance sheets, and cah flow statements for U.S. and foreign businesses
def curate_financial_statements(self,statement_type):
## Error message if inputted statement type is not available
if(statement_type!='income' and statement_type!='balance' and statement_type!='cashflow'):
raise TypeError("Statement type not available: only 'income', 'balance', or 'cashflow' statement type is allowed")
# Probable names for statement selection- may nave to update identifiers as different company uses different statement names
income_terms=['Consolidated Income Statement', 'Consolidated Statements of Income', 'Consolidated Statements of Earnings', 'Consolidated Statements of Operations','Consolidated Statements of Profit or Loss','Profit and Loss Statement','P&L Statement','P/L Statement','Consolidated Income Statement','Consolidated Statement of Income', 'Consolidated Statement of Earnings','Consolidated Statement of Operations','Consolidated Statement of Profit or Loss','Consolidated Profit and Loss Statement','Consolidated P&L Statement','Consolidated P/L Statement','Statement of Consolidated Operations','Statements of Consolidated Operations','Statement of Combined Operation','Statements of Combined Operation']
balance_terms=['Consolidated Balance Sheets', 'Consolidated Balance Sheet','Consolidated Statements of Financial Position', 'Consolidated Statements of Financial Condition','Consolidated Statement of Financial Positions','Consolidated Statement of Financial Conditions', 'Statement of Consolidated Financial Position','Statements of Consolidated Financial Position', 'Statement of Consolidated Financial Condition', 'Statements of Consolidated Financial Condition','Combined Balance Sheet']
cashflow_terms=['Consolidated Statements of Cash Flows','Consolidated Statement of Cash Flows','Cash Flow Statement','Consolidated Cash Flow Statement', 'Statement of Consolidated Cash Flows','Statements of Consolidated Cash Flows','Statement of Combined Cash Flow','Statements of Combined Cash Flow']
# Set root diectory for file access
root_path=os.getcwd()
########### Extract Annual and Quarter Financial Statements (U.S. and foreign businesses)#################
# Retrieve periods and url(s) from the url table called by ghost_report_url()
report_table=self.ghost_report_url()
report_periods=report_table.report_periods.to_list()
if(self.report_type=='annual'):
download_url_container=report_table.annual_download_url.to_list() # container to store the download urls of annual statements
elif(self.report_type=='quarter'):
download_url_container=report_table.quarter_download_url.to_list() # container to store the download urls of quarter statements
# Designate a directory to store downloaded statements (begin statement piling)
statement_pile_path=os.path.join(root_path,'statement_pile')
company_pile_path=os.path.join(statement_pile_path,self.symbol)
try:
os.mkdir(statement_pile_path) # Create the statement_pile_path path
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
try:
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
os.chdir(company_pile_path)
# Downlaod accessible statements into the statement_pile path
# Construct a data frame to store the specified statement type
period_container=[] # container to store statement periods
statement_container=[] # container to store statement table
for url_index in range(len(download_url_container)):
statement_period=report_periods[url_index].strftime("%Y-%m-%d")
if(download_url_container[url_index] is not None and download_url_container[url_index][download_url_container[url_index].rfind('.')+1:len(download_url_container[url_index])]!='xls'):
statement_file=requests.get(download_url_container[url_index])
file_name=self.symbol+statement_period+self.report_type+'.xlsx'
with open(file_name, 'wb+') as fs:
fs.write(statement_file.content) # populating statement contents
dfs=pd.ExcelFile(fs)
sheet_headers=list(map(lambda x: x.lower().replace(' ','').replace('_','').replace('-','').replace(',','').replace("'","").replace('&','').replace('/',''), [dfs.parse(sn).columns[0] for sn in dfs.sheet_names]))
############################ Income Statements ###################################
if (statement_type=='income'):
income_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''),income_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in income_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify income statement and store in dataframe form
income_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store income statement into the statement container
statement_container.append(income_statement)
# Store income statement period into the period container
period_container.append(statement_period)
# Serialize the income statement dataframe into '.pickle'- to be accessed faster next time
income_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store income statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store income statement period into the period container
period_container.append(statement_period)
# Message to warn that income statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' income statement not identified or not available: update income statement identifiers or pass')
############################ Balance Sheets ###################################
if (statement_type=='balance'):
balance_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), balance_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in balance_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify balance sheet and store in dataframe form
balance_sheet=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store balacne sheet into the statement container
statement_container.append(balance_sheet)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Serialize the balance sheet dataframe into '.pickle'- to be accessed faster next time
balance_sheet.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store balance sheet as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Message to warn that balance sheet may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' balance sheet not identified or not available: update balance sheet identifiers or pass')
############################ Cash Flow Statements ###################################
if (statement_type=='cashflow'):
cashflow_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), cashflow_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in cashflow_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify cash flow statement and store in dataframe form
cashflow_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store cash flow statement into the statement container
statement_container.append(cashflow_statement)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Serialize the cash flow statement dataframe into '.pickle'- to be accessed faster next time
cashflow_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store cash flow statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Message to warn that cash flow statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' cashflow statement not identified or not available: update cash flow statement identifiers or pass')
fs.close() # close the downloaded '.xlsx' file
os.remove(file_name) # remove the downloaded '.xlsx' file after extracting financial statements
else:
print(self.symbol+' '+statement_period+' '+self.report_type+' '+statement_type+' statement not available')
# Combine the conpany's income statement(s) or balance sheet(s) or cash flow statement(s), and statement periods into a dataframe
statement_df=pd.DataFrame({'statement_periods':period_container,statement_type+'_statement':statement_container},index=[self.symbol]*len(period_container))
# Return back to root_path (end statement piling)
os.chdir(root_path)
# Return the data frame contructed above if it is not empty
if not statement_df.empty:
return statement_df
else:
return 'No '+self.report_type+' '+statement_type+' statement for '+self.symbol+' between '+self.start_period.strftime("%Y-%m-%d")+' and '+self.end_period.strftime("%Y-%m-%d")
#------------------------Extract Most Recent Income Statements--------------------------------
def ghost_income(self):
bin_path=r'.\\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualIncome" in s for s in bin_files]):
annual_income_file=[s for s in bin_files if "AnnualIncome" in s]
annual_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_income_file))
annual_income_file=[annual_income_file[i] for i in range(len(annual_income_file)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_periods=[annual_income_periods[i] for i in range(len(annual_income_periods)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_file.reverse()
annual_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[6]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[5]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[4]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[2]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[1]).group()
except:
try:
binded_income=pd.read_pickle(bin_path+'\\'+annual_income_file[0])
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_income_periods)>0):
if(end_period-annual_income_periods[0]).days>365:
print('Recommend updating to the latest annual income statements: update via .update_financial_statements("income"), then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[6]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[5]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[4]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[2]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[1]
except:
try:
binded_income=business_income.income_statement[0]
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterIncome" in s for s in bin_files]):
quarter_income_file=[s for s in bin_files if "QuarterIncome" in s]
quarter_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_income_file))
quarter_income_file=[quarter_income_file[i] for i in range(len(quarter_income_file)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_periods=[quarter_income_periods[i] for i in range(len(quarter_income_periods)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_file.reverse()
quarter_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+f) for f in quarter_income_file], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([re.search('\d{4}-\d{2}-\d{2}',f).group() for f in quarter_income_file])
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(quarter_income_periods)>0):
if(end_period-quarter_income_periods[0]).days>180:
print('Recommend updating to the latest quarter income statements: update via .update_financial_statements("income") function, then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat(business_income.income_statement.to_list(), axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([business_income.statement_periods[i] for i in range(len(business_income.statement_periods))])
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
print(binded_message)
return binded_income
#------------------------Extract Most Recent Balance Sheets--------------------------------
def ghost_balance(self):
bin_path=r'.\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualBalance" in s for s in bin_files]):
annual_balance_file=[s for s in bin_files if "AnnualBalance" in s]
annual_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_balance_file))
annual_balance_file=[annual_balance_file[i] for i in range(len(annual_balance_file)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_periods=[annual_balance_periods[i] for i in range(len(annual_balance_periods)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_file.reverse()
annual_balance_periods.reverse()
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[8])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[8]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[7])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[7]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[5]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[3]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[1]).group()
except:
try:
binded_balance=pd.read_pickle(bin_path+'\\'+annual_balance_file[0])
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()
except:
binded_balance=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_balance_periods)>0):
if(end_period-annual_balance_periods[0]).days>365:
print('Recommend updating to the latest annual balance sheets: update via .update_financial_statements("balance") function, then call this function again')
else:
business_balance=self.curate_financial_statements('balance')
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6],business_balance.balance_statement[8]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]+', '+business_balance.statement_periods[8]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6],business_balance.balance_statement[7]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]+', '+business_balance.statement_periods[7]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[6]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4],business_balance.balance_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]+', '+business_balance.statement_periods[5]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[4]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2], business_balance.balance_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]+', '+business_balance.statement_periods[3]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[2]
except:
try:
binded_balance=pd.concat([business_balance.balance_statement[0],business_balance.balance_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]+', '+business_balance.statement_periods[1]
except:
try:
binded_balance=business_balance.balance_statement[0]
binded_message='Ghosted '+self.report_type+' balance sheets for '+business_balance.statement_periods[0]
except:
binded_balance=None
binded_message='No '+self.report_type+' balance sheets for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterBalance" in s for s in bin_files]):
quarter_balance_file=[s for s in bin_files if "QuarterBalance" in s]
quarter_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_balance_file))
quarter_balance_file=[quarter_balance_file[i] for i in range(len(quarter_balance_file)) if quarter_balance_periods[i]>start_period and quarter_balance_periods[i]<=end_period]
quarter_balance_periods=[quarter_balance_periods[i] for i in range(len(quarter_balance_periods)) if quarter_balance_periods[i]>start_period and quarter_balance_periods[i]<=end_period]
quarter_balance_file.reverse()
quarter_balance_periods.reverse()
try:
binded_balance=pd.concat([
|
pd.read_pickle(bin_path+'\\'+f)
|
pandas.read_pickle
|
# coding: utf-8
# In[1]:
import pandas as pd
import seaborn as sns
import sys
from matplotlib import pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
MIN_PYTHON = (3, 6)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
# In[2]:
df1 = pd.read_csv('part1.log.csv',
index_col='TS',
names=['TS', 'Speed', 'TLD', 'Location'],
parse_dates=True)
df2 = pd.read_csv('part2.log.csv',
index_col='TS',
names=['TS', 'Speed', 'TLD', 'Location'],
parse_dates=True)
# remove data outside of our test date range
df1 = df1.loc['2017-11-20':'2017-11-22']
df2 = df2.loc['2017-11-20':'2017-11-22']
# In[3]:
# remove rows from TLDs with low occurances
df1_c = df1[df1.groupby('TLD').Speed.transform(len) >= 10].copy(True)
df2_c = df2[df2.groupby('TLD').Speed.transform(len) >= 10].copy(True)
print(df1_c.count())
print(df1_c.count())
# The actual medians for comcast
# print(df1_c[df1_c['TLD'] == 'comcast.net'].median())
# print(df2_c[df2_c['TLD'] == 'comcast.net'].median())
# print(df1_c['TLD'].value_counts())
# print(df2_c['TLD'].value_counts())
# Filter by speed
# df1_c = df1_c[df1_c['Speed'] < 15000]
# df2_c = df2_c[df2_c['Speed'] < 15000]
ranks = pd.Index(['comcast.net', 'cox.net', 'charter.com', 'rr.com', 'verizon.net', 'shawcable.net',
'virginm.net', 'qwest.net', 'btcentralplus.com',
't-ipconnect.de', 'sbcglobal.net'],
dtype='object', name='TLD')
FIG_WIDTH = 20
FIG_HEIGHT = 8
sns.set(font_scale=2)
sns.set_style("white")
sns.set_style({
'font.family': [u'sans-serif'],
'font.sans-serif': ['Chrono', 'DejaVu Sans']
})
fig, _ = plt.subplots()
fig.set_figwidth(FIG_WIDTH)
fig.set_figheight(FIG_HEIGHT)
fig.suptitle('Connection to Linode')
bp2 = sns.boxplot(data=df2_c,
y='TLD',
x='Speed',
orient='h',
order=ranks)
_ = bp2.set(xlim=(0, 30000))
fig, _ = plt.subplots()
fig.set_figwidth(FIG_WIDTH)
fig.set_figheight(FIG_HEIGHT)
fig.suptitle('Connection to Panic')
bp1 = sns.boxplot(data=df1_c,
y='TLD',
x='Speed',
orient='h',
order=ranks)
_ = bp1.set(xlim=(0, 30000))
# In[4]:
df1_c = df1.copy(True)
df2_c = df2.copy(True)
df1_cc = df1_c[df1_c['TLD'] == 'comcast.net'].resample('h').median()
df2_cc = df2_c[df2_c['TLD'] == 'comcast.net'].resample('h').median()
df1_cc['Speed'].interpolate(inplace=True)
df2_cc['Speed'].interpolate(inplace=True)
fig, _ = plt.subplots()
fig.set_figwidth(FIG_WIDTH)
fig.set_figheight(FIG_HEIGHT)
p1 = df1_cc['Speed'].plot(label="Comcast")
_ = plt.legend()
_ = p1.set(ylim=(0, 20000))
fig, _ = plt.subplots()
fig.set_figwidth(FIG_WIDTH)
fig.set_figheight(FIG_HEIGHT)
p2 = df2_cc['Speed'].plot(label="Comcast")
_ = plt.legend()
_ = p2.set(ylim=(0, 20000))
# In[5]:
def get_dfs_filtered_by_time(df, label):
hour = df.index.hour
selector_l = ((15 <= hour) & (hour <= 23)) | ((0 <= hour) & (hour < 1))
selector_h = ((1 <= hour) & (hour < 15))
df_l = df[selector_l].assign(Timeframe=f'{label} Evening')
df_h = df[selector_h].assign(Timeframe=f'{label} Morning')
return df_l, df_h
def plot_by_tld(df1, df2, tld):
df1 = df1[df1['TLD'] == tld]
df2 = df2[df2['TLD'] == tld]
df1_l, df1_h = get_dfs_filtered_by_time(df1, 'Panic')
df2_l, df2_h = get_dfs_filtered_by_time(df2, 'Linode')
df_combined =
|
pd.concat([df1_l, df1_h, df2_l, df2_h])
|
pandas.concat
|
"""
YASA (Yet Another Spindle Algorithm): fast and robust detection of spindles,
slow-waves, and rapid eye movements from sleep EEG recordings.
- Author: <NAME> (www.raphaelvallat.com)
- GitHub: https://github.com/raphaelvallat/yasa
- License: BSD 3-Clause License
"""
import mne
import logging
import numpy as np
import pandas as pd
from scipy import signal
from mne.filter import filter_data
from collections import OrderedDict
from scipy.interpolate import interp1d
from scipy.fftpack import next_fast_len
from sklearn.ensemble import IsolationForest
from .spectral import stft_power
from .numba import _detrend, _rms
from .io import set_log_level, is_tensorpac_installed, is_pyriemann_installed
from .others import (moving_transform, trimbothstd, get_centered_indices,
sliding_window, _merge_close, _zerocrossings)
logger = logging.getLogger('yasa')
__all__ = ['art_detect', 'spindles_detect', 'SpindlesResults', 'sw_detect', 'SWResults',
'rem_detect', 'REMResults']
#############################################################################
# DATA PREPROCESSING
#############################################################################
def _check_data_hypno(data, sf=None, ch_names=None, hypno=None, include=None, check_amp=True):
"""Helper functions for preprocessing of data and hypnogram."""
# 1) Extract data as a 2D NumPy array
if isinstance(data, mne.io.BaseRaw):
sf = data.info['sfreq'] # Extract sampling frequency
ch_names = data.ch_names # Extract channel names
data = data.get_data() * 1e6 # Convert from V to uV
else:
assert sf is not None, 'sf must be specified if not using MNE Raw.'
data = np.asarray(data, dtype=np.float64)
assert data.ndim in [1, 2], 'data must be 1D (times) or 2D (chan, times).'
if data.ndim == 1:
# Force to 2D array: (n_chan, n_samples)
data = data[None, ...]
n_chan, n_samples = data.shape
# 2) Check channel names
if ch_names is None:
ch_names = ['CHAN' + str(i).zfill(3) for i in range(n_chan)]
else:
assert len(ch_names) == n_chan
# 3) Check hypnogram
if hypno is not None:
hypno = np.asarray(hypno, dtype=int)
assert hypno.ndim == 1, 'Hypno must be one dimensional.'
assert hypno.size == n_samples, 'Hypno must have same size as data.'
unique_hypno = np.unique(hypno)
logger.info('Number of unique values in hypno = %i', unique_hypno.size)
assert include is not None, 'include cannot be None if hypno is given'
include = np.atleast_1d(np.asarray(include))
assert include.size >= 1, '`include` must have at least one element.'
assert hypno.dtype.kind == include.dtype.kind, ('hypno and include must have same dtype')
assert np.in1d(hypno, include).any(), ('None of the stages specified '
'in `include` are present in '
'hypno.')
# 4) Check data amplitude
logger.info('Number of samples in data = %i', n_samples)
logger.info('Sampling frequency = %.2f Hz', sf)
logger.info('Data duration = %.2f seconds', n_samples / sf)
all_ptp = np.ptp(data, axis=-1)
all_trimstd = trimbothstd(data, cut=0.05)
bad_chan = np.zeros(n_chan, dtype=bool)
for i in range(n_chan):
logger.info('Trimmed standard deviation of %s = %.4f uV' % (ch_names[i], all_trimstd[i]))
logger.info('Peak-to-peak amplitude of %s = %.4f uV' % (ch_names[i], all_ptp[i]))
if check_amp and not(0.1 < all_trimstd[i] < 1e3):
logger.error('Wrong data amplitude for %s '
'(trimmed STD = %.3f). Unit of data MUST be uV! '
'Channel will be skipped.'
% (ch_names[i], all_trimstd[i]))
bad_chan[i] = True
# 5) Create sleep stage vector mask
if hypno is not None:
mask = np.in1d(hypno, include)
else:
mask = np.ones(n_samples, dtype=bool)
return (data, sf, ch_names, hypno, include, mask, n_chan, n_samples, bad_chan)
#############################################################################
# BASE DETECTION RESULTS CLASS
#############################################################################
class _DetectionResults(object):
"""Main class for detection results."""
def __init__(self, events, data, sf, ch_names, hypno, data_filt):
self._events = events
self._data = data
self._sf = sf
self._hypno = hypno
self._ch_names = ch_names
self._data_filt = data_filt
def _check_mask(self, mask):
assert isinstance(mask, (pd.Series, np.ndarray, list, type(None)))
n_events = self._events.shape[0]
if mask is None:
mask = np.ones(n_events, dtype="bool") # All set to True
else:
mask = np.asarray(mask)
assert mask.dtype.kind == "b", "Mask must be a boolean array."
assert mask.ndim == 1, "Mask must be one-dimensional"
assert mask.size == n_events, "Mask.size must be the number of detected events."
return mask
def summary(self, event_type, grp_chan=False, grp_stage=False, aggfunc='mean', sort=True,
mask=None):
"""Summary"""
# Check masking
mask = self._check_mask(mask)
# Define grouping
grouper = []
if grp_stage is True and 'Stage' in self._events:
grouper.append('Stage')
if grp_chan is True and 'Channel' in self._events:
grouper.append('Channel')
if not len(grouper):
# Return a copy of self._events after masking, without grouping
return self._events.loc[mask, :].copy()
if event_type == 'spindles':
aggdict = {'Start': 'count',
'Duration': aggfunc,
'Amplitude': aggfunc,
'RMS': aggfunc,
'AbsPower': aggfunc,
'RelPower': aggfunc,
'Frequency': aggfunc,
'Oscillations': aggfunc,
'Symmetry': aggfunc}
# if 'SOPhase' in self._events:
# from scipy.stats import circmean
# aggdict['SOPhase'] = lambda x: circmean(x, low=-np.pi, high=np.pi)
elif event_type == 'sw':
aggdict = {'Start': 'count',
'Duration': aggfunc,
'ValNegPeak': aggfunc,
'ValPosPeak': aggfunc,
'PTP': aggfunc,
'Slope': aggfunc,
'Frequency': aggfunc}
if 'PhaseAtSigmaPeak' in self._events:
from scipy.stats import circmean
aggdict['PhaseAtSigmaPeak'] = lambda x: circmean(x, low=-np.pi, high=np.pi)
aggdict['ndPAC'] = aggfunc
if "CooccurringSpindle" in self._events:
# We do not average "CooccurringSpindlePeak"
aggdict["CooccurringSpindle"] = aggfunc
aggdict["DistanceSpindleToSW"] = aggfunc
else: # REM
aggdict = {'Start': 'count',
'Duration': aggfunc,
'LOCAbsValPeak': aggfunc,
'ROCAbsValPeak': aggfunc,
'LOCAbsRiseSlope': aggfunc,
'ROCAbsRiseSlope': aggfunc,
'LOCAbsFallSlope': aggfunc,
'ROCAbsFallSlope': aggfunc}
# Apply grouping, after masking
df_grp = self._events.loc[mask, :].groupby(grouper, sort=sort, as_index=False).agg(aggdict)
df_grp = df_grp.rename(columns={'Start': 'Count'})
# Calculate density (= number per min of each stage)
if self._hypno is not None and grp_stage is True:
stages = np.unique(self._events['Stage'])
dur = {}
for st in stages:
# Get duration in minutes of each stage present in dataframe
dur[st] = self._hypno[self._hypno == st].size / (60 * self._sf)
# Insert new density column in grouped dataframe after count
df_grp.insert(
loc=df_grp.columns.get_loc('Count') + 1, column='Density',
value=df_grp.apply(lambda rw: rw['Count'] / dur[rw['Stage']], axis=1))
return df_grp.set_index(grouper)
def get_mask(self):
"""get_mask"""
from yasa.others import _index_to_events
mask = np.zeros(self._data.shape, dtype=int)
for i in self._events['IdxChannel'].unique():
ev_chan = self._events[self._events['IdxChannel'] == i]
idx_ev = _index_to_events(
ev_chan[['Start', 'End']].to_numpy() * self._sf)
mask[i, idx_ev] = 1
return np.squeeze(mask)
def get_sync_events(self, center, time_before, time_after, filt=(None, None), mask=None,
as_dataframe=True):
"""Get_sync_events (not for REM, spindles & SW only)"""
from yasa.others import get_centered_indices
assert time_before >= 0
assert time_after >= 0
bef = int(self._sf * time_before)
aft = int(self._sf * time_after)
# TODO: Step size is determined by sf: 0.01 sec at 100 Hz, 0.002 sec at
# 500 Hz, 0.00390625 sec at 256 Hz. Should we add resample=100 (Hz) or step_size=0.01?
time = np.arange(-bef, aft + 1, dtype='int') / self._sf
if any(filt):
data = mne.filter.filter_data(
self._data, self._sf, l_freq=filt[0], h_freq=filt[1], method='fir', verbose=False)
else:
data = self._data
# Apply mask
mask = self._check_mask(mask)
masked_events = self._events.loc[mask, :]
output = []
for i in masked_events['IdxChannel'].unique():
# Copy is required to merge with the stage later on
ev_chan = masked_events[masked_events['IdxChannel'] == i].copy()
ev_chan['Event'] = np.arange(ev_chan.shape[0])
peaks = (ev_chan[center] * self._sf).astype(int).to_numpy()
# Get centered indices
idx, idx_valid = get_centered_indices(data[i, :], peaks, bef, aft)
# If no good epochs are returned raise a warning
if len(idx_valid) == 0:
logger.error(
'Time before and/or time after exceed data bounds, please '
'lower the temporal window around center. Skipping channel.')
continue
# Get data at indices and time vector
amps = data[i, idx]
if not as_dataframe:
# Output is a list (n_channels) of numpy arrays (n_events, n_times)
output.append(amps)
continue
# Convert to long-format dataframe
df_chan = pd.DataFrame(amps.T)
df_chan['Time'] = time
# Convert to long-format
df_chan = df_chan.melt(id_vars='Time', var_name='Event', value_name='Amplitude')
# Append stage
if 'Stage' in masked_events:
df_chan = df_chan.merge(ev_chan[['Event', 'Stage']].iloc[idx_valid])
# Append channel name
df_chan['Channel'] = ev_chan['Channel'].iloc[0]
df_chan['IdxChannel'] = i
# Append to master dataframe
output.append(df_chan)
if as_dataframe:
output = pd.concat(output, ignore_index=True)
return output
def get_coincidence_matrix(self, scaled=True):
"""get_coincidence_matrix"""
if len(self._ch_names) < 2:
raise ValueError("At least 2 channels are required to calculate coincidence.")
mask = self.get_mask()
mask = pd.DataFrame(mask.T, columns=self._ch_names)
mask.columns.name = "Channel"
def _coincidence(x, y):
"""Calculate the (scaled) coincidence."""
coincidence = (x * y).sum()
if scaled:
# Handle division by zero error
denom = (x.sum() * y.sum())
if denom == 0:
coincidence = np.nan
else:
coincidence /= denom
return coincidence
coinc_mat = mask.corr(method=_coincidence)
if not scaled:
# Otherwise diagonal values are set to 1
np.fill_diagonal(coinc_mat.values, mask.sum())
coinc_mat = coinc_mat.astype(int)
return coinc_mat
def plot_average(self, event_type, center='Peak', hue='Channel', time_before=1,
time_after=1, filt=(None, None), mask=None, figsize=(6, 4.5), **kwargs):
"""Plot the average event (not for REM, spindles & SW only)"""
import seaborn as sns
import matplotlib.pyplot as plt
df_sync = self.get_sync_events(center=center, time_before=time_before,
time_after=time_after, filt=filt, mask=mask)
assert not df_sync.empty, "Could not calculate event-locked data."
assert hue in ['Stage', 'Channel'], "hue must be 'Channel' or 'Stage'"
assert hue in df_sync.columns, "%s is not present in data." % hue
if event_type == 'spindles':
title = "Average spindle"
else: # "sw":
title = "Average SW"
# Start figure
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.lineplot(data=df_sync, x='Time', y='Amplitude', hue=hue, ax=ax, **kwargs)
# ax.legend(frameon=False, loc='lower right')
ax.set_xlim(df_sync['Time'].min(), df_sync['Time'].max())
ax.set_title(title)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Amplitude (uV)')
return ax
def plot_detection(self):
"""Plot an overlay of the detected events on the signal."""
import matplotlib.pyplot as plt
import ipywidgets as ipy
# Define mask
sf = self._sf
win_size = 10
mask = self.get_mask()
highlight = self._data * mask
highlight = np.where(highlight == 0, np.nan, highlight)
highlight_filt = self._data_filt * mask
highlight_filt = np.where(highlight_filt == 0, np.nan, highlight_filt)
n_epochs = int((self._data.shape[-1] / sf) / win_size)
times = np.arange(self._data.shape[-1]) / sf
# Define xlim and xrange
xlim = [0, win_size]
xrng = np.arange(xlim[0] * sf, (xlim[1] * sf + 1), dtype=int)
# Plot
fig, ax = plt.subplots(figsize=(12, 4))
plt.plot(times[xrng], self._data[0, xrng], 'k', lw=1)
plt.plot(times[xrng], highlight[0, xrng], 'indianred')
plt.xlabel('Time (seconds)')
plt.ylabel('Amplitude (uV)')
fig.canvas.header_visible = False
fig.tight_layout()
# WIDGETS
layout = ipy.Layout(
width="50%",
justify_content='center',
align_items='center'
)
sl_ep = ipy.IntSlider(
min=0,
max=n_epochs,
step=1,
value=0,
layout=layout,
description="Epoch:",
)
sl_amp = ipy.IntSlider(
min=25,
max=500,
step=25,
value=150,
layout=layout,
orientation='horizontal',
description="Amplitude:"
)
dd_ch = ipy.Dropdown(
options=self._ch_names, value=self._ch_names[0],
description='Channel:'
)
dd_win = ipy.Dropdown(
options=[1, 5, 10, 30, 60],
value=win_size,
description='Window size:',
)
dd_check = ipy.Checkbox(
value=False,
description='Filtered',
)
def update(epoch, amplitude, channel, win_size, filt):
"""Update plot."""
n_epochs = int((self._data.shape[-1] / sf) / win_size)
sl_ep.max = n_epochs
xlim = [epoch * win_size, (epoch + 1) * win_size]
xrng = np.arange(xlim[0] * sf, (xlim[1] * sf), dtype=int)
# Check if filtered
data = self._data if not filt else self._data_filt
overlay = highlight if not filt else highlight_filt
try:
ax.lines[0].set_data(times[xrng], data[dd_ch.index, xrng])
ax.lines[1].set_data(times[xrng], overlay[dd_ch.index, xrng])
ax.set_xlim(xlim)
except IndexError:
pass
ax.set_ylim([-amplitude, amplitude])
return ipy.interact(update, epoch=sl_ep, amplitude=sl_amp,
channel=dd_ch, win_size=dd_win, filt=dd_check)
#############################################################################
# SPINDLES DETECTION
#############################################################################
def spindles_detect(data, sf=None, ch_names=None, hypno=None,
include=(1, 2, 3), freq_sp=(12, 15), freq_broad=(1, 30),
duration=(0.5, 2), min_distance=500,
thresh={'rel_pow': 0.2, 'corr': 0.65, 'rms': 1.5},
multi_only=False, remove_outliers=False, verbose=False):
"""Spindles detection.
Parameters
----------
data : array_like
Single or multi-channel data. Unit must be uV and shape (n_samples) or
(n_chan, n_samples). Can also be a :py:class:`mne.io.BaseRaw`,
in which case ``data``, ``sf``, and ``ch_names`` will be automatically
extracted, and ``data`` will also be automatically converted from
Volts (MNE) to micro-Volts (YASA).
sf : float
Sampling frequency of the data in Hz.
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
.. tip:: If the detection is taking too long, make sure to downsample
your data to 100 Hz (or 128 Hz). For more details, please refer to
:py:func:`mne.filter.resample`.
ch_names : list of str
Channel names. Can be omitted if ``data`` is a
:py:class:`mne.io.BaseRaw`.
hypno : array_like
Sleep stage (hypnogram). If the hypnogram is loaded, the
detection will only be applied to the value defined in
``include`` (default = N1 + N2 + N3 sleep).
The hypnogram must have the same number of samples as ``data``.
To upsample your hypnogram, please refer to
:py:func:`yasa.hypno_upsample_to_data`.
.. note::
The default hypnogram format in YASA is a 1D integer
vector where:
- -2 = Unscored
- -1 = Artefact / Movement
- 0 = Wake
- 1 = N1 sleep
- 2 = N2 sleep
- 3 = N3 sleep
- 4 = REM sleep
include : tuple, list or int
Values in ``hypno`` that will be included in the mask. The default is
(1, 2, 3), meaning that the detection is applied on N1, N2 and N3
sleep. This has no effect when ``hypno`` is None.
freq_sp : tuple or list
Spindles frequency range. Default is 12 to 15 Hz. Please note that YASA
uses a FIR filter (implemented in MNE) with a 1.5Hz transition band,
which means that for `freq_sp = (12, 15 Hz)`, the -6 dB points are
located at 11.25 and 15.75 Hz.
freq_broad : tuple or list
Broad band frequency range. Default is 1 to 30 Hz.
duration : tuple or list
The minimum and maximum duration of the spindles.
Default is 0.5 to 2 seconds.
min_distance : int
If two spindles are closer than ``min_distance`` (in ms), they are
merged into a single spindles. Default is 500 ms.
thresh : dict
Detection thresholds:
* ``'rel_pow'``: Relative power (= power ratio freq_sp / freq_broad).
* ``'corr'``: Moving correlation between original signal and
sigma-filtered signal.
* ``'rms'``: Number of standard deviations above the mean of a moving
root mean square of sigma-filtered signal.
You can disable one or more threshold by putting ``None`` instead:
.. code-block:: python
thresh = {'rel_pow': None, 'corr': 0.65, 'rms': 1.5}
thresh = {'rel_pow': None, 'corr': None, 'rms': 3}
multi_only : boolean
Define the behavior of the multi-channel detection. If True, only
spindles that are present on at least two channels are kept. If False,
no selection is applied and the output is just a concatenation of the
single-channel detection dataframe. Default is False.
remove_outliers : boolean
If True, YASA will automatically detect and remove outliers spindles
using :py:class:`sklearn.ensemble.IsolationForest`.
The outliers detection is performed on all the spindles
parameters with the exception of the ``Start``, ``Peak``, ``End``,
``Stage``, and ``SOPhase`` columns.
YASA uses a random seed (42) to ensure reproducible results.
Note that this step will only be applied if there are more than 50
detected spindles in the first place. Default to False.
verbose : bool or str
Verbose level. Default (False) will only print warning and error
messages. The logging levels are 'debug', 'info', 'warning', 'error',
and 'critical'. For most users the choice is between 'info'
(or ``verbose=True``) and warning (``verbose=False``).
.. versionadded:: 0.2.0
Returns
-------
sp : :py:class:`yasa.SpindlesResults`
To get the full detection dataframe, use:
>>> sp = spindles_detect(...)
>>> sp.summary()
This will give a :py:class:`pandas.DataFrame` where each row is a
detected spindle and each column is a parameter (= feature or property)
of this spindle. To get the average spindles parameters per channel and
sleep stage:
>>> sp.summary(grp_chan=True, grp_stage=True)
Notes
-----
The parameters that are calculated for each spindle are:
* ``'Start'``: Start time of the spindle, in seconds from the beginning of
data.
* ``'Peak'``: Time at the most prominent spindle peak (in seconds).
* ``'End'`` : End time (in seconds).
* ``'Duration'``: Duration (in seconds)
* ``'Amplitude'``: Peak-to-peak amplitude of the (detrended) spindle in
the raw data (in µV).
* ``'RMS'``: Root-mean-square (in µV)
* ``'AbsPower'``: Median absolute power (in log10 µV^2),
calculated from the Hilbert-transform of the ``freq_sp`` filtered signal.
* ``'RelPower'``: Median relative power of the ``freq_sp`` band in spindle
calculated from a short-term fourier transform and expressed as a
proportion of the total power in ``freq_broad``.
* ``'Frequency'``: Median instantaneous frequency of spindle (in Hz),
derived from an Hilbert transform of the ``freq_sp`` filtered signal.
* ``'Oscillations'``: Number of oscillations (= number of positive peaks
in spindle.)
* ``'Symmetry'``: Location of the most prominent peak of spindle,
normalized from 0 (start) to 1 (end). Ideally this value should be close
to 0.5, indicating that the most prominent peak is halfway through the
spindle.
* ``'Stage'`` : Sleep stage during which spindle occured, if ``hypno``
was provided.
All parameters are calculated from the broadband-filtered EEG
(frequency range defined in ``freq_broad``).
For better results, apply this detection only on artefact-free NREM sleep.
.. warning::
A critical bug was fixed in YASA 0.6.1, in which the number of detected spindles could
vary drastically depending on the sampling frequency of the data. Please make sure to check
any results obtained with this function prior to the 0.6.1 release.
References
----------
The sleep spindles detection algorithm is based on:
* <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2018). `A sleep spindle detection algorithm that emulates human expert
spindle scoring. <https://doi.org/10.1016/j.jneumeth.2018.08.014>`_
Journal of Neuroscience Methods.
Examples
--------
For a walkthrough of the spindles detection, please refer to the following
Jupyter notebooks:
https://github.com/raphaelvallat/yasa/blob/master/notebooks/01_spindles_detection.ipynb
https://github.com/raphaelvallat/yasa/blob/master/notebooks/02_spindles_detection_multi.ipynb
https://github.com/raphaelvallat/yasa/blob/master/notebooks/03_spindles_detection_NREM_only.ipynb
https://github.com/raphaelvallat/yasa/blob/master/notebooks/04_spindles_slow_fast.ipynb
"""
set_log_level(verbose)
(data, sf, ch_names, hypno, include, mask, n_chan, n_samples, bad_chan
) = _check_data_hypno(data, sf, ch_names, hypno, include)
# If all channels are bad
if sum(bad_chan) == n_chan:
logger.warning('All channels have bad amplitude. Returning None.')
return None
# Check detection thresholds
if 'rel_pow' not in thresh.keys():
thresh['rel_pow'] = 0.20
if 'corr' not in thresh.keys():
thresh['corr'] = 0.65
if 'rms' not in thresh.keys():
thresh['rms'] = 1.5
do_rel_pow = thresh['rel_pow'] not in [None, "none", "None"]
do_corr = thresh['corr'] not in [None, "none", "None"]
do_rms = thresh['rms'] not in [None, "none", "None"]
n_thresh = sum([do_rel_pow, do_corr, do_rms])
assert n_thresh >= 1, 'At least one threshold must be defined.'
# Filtering
nfast = next_fast_len(n_samples)
# 1) Broadband bandpass filter (optional -- careful of lower freq for PAC)
data_broad = filter_data(data, sf, freq_broad[0], freq_broad[1], method='fir', verbose=0)
# 2) Sigma bandpass filter
# The width of the transition band is set to 1.5 Hz on each side,
# meaning that for freq_sp = (12, 15 Hz), the -6 dB points are located at
# 11.25 and 15.75 Hz.
data_sigma = filter_data(
data, sf, freq_sp[0], freq_sp[1], l_trans_bandwidth=1.5, h_trans_bandwidth=1.5,
method='fir', verbose=0)
# Hilbert power (to define the instantaneous frequency / power)
analytic = signal.hilbert(data_sigma, N=nfast)[:, :n_samples]
inst_phase = np.angle(analytic)
inst_pow = np.square(np.abs(analytic))
inst_freq = (sf / (2 * np.pi) * np.diff(inst_phase, axis=-1))
# Extract the SO signal for coupling
# if coupling:
# # We need to use the original (non-filtered data)
# data_so = filter_data(data, sf, freq_so[0], freq_so[1], method='fir',
# l_trans_bandwidth=0.1, h_trans_bandwidth=0.1,
# verbose=0)
# # Now extract the instantaneous phase using Hilbert transform
# so_phase = np.angle(signal.hilbert(data_so, N=nfast)[:, :n_samples])
# Initialize empty output dataframe
df = pd.DataFrame()
for i in range(n_chan):
# ####################################################################
# START SINGLE CHANNEL DETECTION
# ####################################################################
# First, skip channels with bad data amplitude
if bad_chan[i]:
continue
# Compute the pointwise relative power using interpolated STFT
# Here we use a step of 200 ms to speed up the computation.
# Note that even if the threshold is None we still need to calculate it
# for the individual spindles parameter (RelPow).
f, t, Sxx = stft_power(
data_broad[i, :], sf, window=2, step=.2, band=freq_broad, interp=False, norm=True)
idx_sigma = np.logical_and(f >= freq_sp[0], f <= freq_sp[1])
rel_pow = Sxx[idx_sigma].sum(0)
# Let's interpolate `rel_pow` to get one value per sample
# Note that we could also have use the `interp=True` in the
# `stft_power` function, however 2D interpolation is much slower than
# 1D interpolation.
func = interp1d(t, rel_pow, kind='cubic', bounds_error=False, fill_value=0)
t = np.arange(n_samples) / sf
rel_pow = func(t)
if do_corr:
_, mcorr = moving_transform(x=data_sigma[i, :], y=data_broad[i, :], sf=sf, window=.3,
step=.1, method='corr', interp=True)
if do_rms:
_, mrms = moving_transform(x=data_sigma[i, :], sf=sf, window=.3, step=.1, method='rms',
interp=True)
# Let's define the thresholds
if hypno is None:
thresh_rms = mrms.mean() + thresh['rms'] * trimbothstd(mrms, cut=0.10)
else:
thresh_rms = mrms[mask].mean() + thresh['rms'] * trimbothstd(mrms[mask], cut=0.10)
# Avoid too high threshold caused by Artefacts / Motion during Wake
thresh_rms = min(thresh_rms, 10)
logger.info('Moving RMS threshold = %.3f', thresh_rms)
# Boolean vector of supra-threshold indices
idx_sum = np.zeros(n_samples)
if do_rel_pow:
idx_rel_pow = (rel_pow >= thresh['rel_pow']).astype(int)
idx_sum += idx_rel_pow
logger.info('N supra-theshold relative power = %i', idx_rel_pow.sum())
if do_corr:
idx_mcorr = (mcorr >= thresh['corr']).astype(int)
idx_sum += idx_mcorr
logger.info('N supra-theshold moving corr = %i', idx_mcorr.sum())
if do_rms:
idx_mrms = (mrms >= thresh_rms).astype(int)
idx_sum += idx_mrms
logger.info('N supra-theshold moving RMS = %i', idx_mrms.sum())
# Make sure that we do not detect spindles outside mask
if hypno is not None:
idx_sum[~mask] = 0
# The detection using the three thresholds tends to underestimate the
# real duration of the spindle. To overcome this, we compute a soft
# threshold by smoothing the idx_sum vector with a ~100 ms window.
# Sampling frequency = 100 Hz --> w = 10 samples
# Sampling frequecy = 256 Hz --> w = 25 samples = 97 ms
w = int(0.1 * sf)
# Critical bugfix March 2022, see https://github.com/raphaelvallat/yasa/pull/55
idx_sum = np.convolve(idx_sum, np.ones(w), mode='same') / w
# And we then find indices that are strictly greater than 2, i.e. we
# find the 'true' beginning and 'true' end of the events by finding
# where at least two out of the three treshold were crossed.
where_sp = np.where(idx_sum > (n_thresh - 1))[0]
# If no events are found, skip to next channel
if not len(where_sp):
logger.warning('No spindle were found in channel %s.', ch_names[i])
continue
# Merge events that are too close
if min_distance is not None and min_distance > 0:
where_sp = _merge_close(where_sp, min_distance, sf)
# Extract start, end, and duration of each spindle
sp = np.split(where_sp, np.where(np.diff(where_sp) != 1)[0] + 1)
idx_start_end = np.array([[k[0], k[-1]] for k in sp]) / sf
sp_start, sp_end = idx_start_end.T
sp_dur = sp_end - sp_start
# Find events with bad duration
good_dur = np.logical_and(sp_dur > duration[0], sp_dur < duration[1])
# If no events of good duration are found, skip to next channel
if all(~good_dur):
logger.warning('No spindle were found in channel %s.', ch_names[i])
continue
# Initialize empty variables
sp_amp = np.zeros(len(sp))
sp_freq = np.zeros(len(sp))
sp_rms = np.zeros(len(sp))
sp_osc = np.zeros(len(sp))
sp_sym = np.zeros(len(sp))
sp_abs = np.zeros(len(sp))
sp_rel = np.zeros(len(sp))
sp_sta = np.zeros(len(sp))
sp_pro = np.zeros(len(sp))
# sp_cou = np.zeros(len(sp))
# Number of oscillations (number of peaks separated by at least 60 ms)
# --> 60 ms because 1000 ms / 16 Hz = 62.5 m, in other words, at 16 Hz,
# peaks are separated by 62.5 ms. At 11 Hz peaks are separated by 90 ms
distance = 60 * sf / 1000
for j in np.arange(len(sp))[good_dur]:
# Important: detrend the signal to avoid wrong PTP amplitude
sp_x = np.arange(data_broad[i, sp[j]].size, dtype=np.float64)
sp_det = _detrend(sp_x, data_broad[i, sp[j]])
# sp_det = signal.detrend(data_broad[i, sp[i]], type='linear')
sp_amp[j] = np.ptp(sp_det) # Peak-to-peak amplitude
sp_rms[j] = _rms(sp_det) # Root mean square
sp_rel[j] = np.median(rel_pow[sp[j]]) # Median relative power
# Hilbert-based instantaneous properties
sp_inst_freq = inst_freq[i, sp[j]]
sp_inst_pow = inst_pow[i, sp[j]]
sp_abs[j] = np.median(np.log10(sp_inst_pow[sp_inst_pow > 0]))
sp_freq[j] = np.median(sp_inst_freq[sp_inst_freq > 0])
# Number of oscillations
peaks, peaks_params = signal.find_peaks(
sp_det, distance=distance, prominence=(None, None))
sp_osc[j] = len(peaks)
# For frequency and amplitude, we can also optionally use these
# faster alternatives. If we use them, we do not need to compute
# the Hilbert transform of the filtered signal.
# sp_freq[j] = sf / np.mean(np.diff(peaks))
# sp_amp[j] = peaks_params['prominences'].max()
# Peak location & symmetry index
# pk is expressed in sample since the beginning of the spindle
pk = peaks[peaks_params['prominences'].argmax()]
sp_pro[j] = sp_start[j] + pk / sf
sp_sym[j] = pk / sp_det.size
# SO-spindles coupling
# if coupling:
# sp_cou[j] = so_phase[i, sp[j]][pk]
# Sleep stage
if hypno is not None:
sp_sta[j] = hypno[sp[j]][0]
# Create a dataframe
sp_params = {'Start': sp_start,
'Peak': sp_pro,
'End': sp_end,
'Duration': sp_dur,
'Amplitude': sp_amp,
'RMS': sp_rms,
'AbsPower': sp_abs,
'RelPower': sp_rel,
'Frequency': sp_freq,
'Oscillations': sp_osc,
'Symmetry': sp_sym,
# 'SOPhase': sp_cou,
'Stage': sp_sta}
df_chan = pd.DataFrame(sp_params)[good_dur]
# We need at least 50 detected spindles to apply the Isolation Forest.
if remove_outliers and df_chan.shape[0] >= 50:
col_keep = ['Duration', 'Amplitude', 'RMS', 'AbsPower', 'RelPower',
'Frequency', 'Oscillations', 'Symmetry']
ilf = IsolationForest(
contamination='auto', max_samples='auto', verbose=0, random_state=42)
good = ilf.fit_predict(df_chan[col_keep])
good[good == -1] = 0
logger.info('%i outliers were removed in channel %s.'
% ((good == 0).sum(), ch_names[i]))
# Remove outliers from DataFrame
df_chan = df_chan[good.astype(bool)]
logger.info('%i spindles were found in channel %s.'
% (df_chan.shape[0], ch_names[i]))
# ####################################################################
# END SINGLE CHANNEL DETECTION
# ####################################################################
df_chan['Channel'] = ch_names[i]
df_chan['IdxChannel'] = i
df = pd.concat([df, df_chan], axis=0, ignore_index=True)
# If no spindles were detected, return None
if df.empty:
logger.warning('No spindles were found in data. Returning None.')
return None
# Remove useless columns
to_drop = []
if hypno is None:
to_drop.append('Stage')
else:
df['Stage'] = df['Stage'].astype(int)
# if not coupling:
# to_drop.append('SOPhase')
if len(to_drop):
df = df.drop(columns=to_drop)
# Find spindles that are present on at least two channels
if multi_only and df['Channel'].nunique() > 1:
# We round to the nearest second
idx_good = np.logical_or(
df['Start'].round(0).duplicated(keep=False),
df['End'].round(0).duplicated(keep=False)).to_list()
df = df[idx_good].reset_index(drop=True)
return SpindlesResults(events=df, data=data, sf=sf, ch_names=ch_names,
hypno=hypno, data_filt=data_sigma)
class SpindlesResults(_DetectionResults):
"""Output class for spindles detection.
Attributes
----------
_events : :py:class:`pandas.DataFrame`
Output detection dataframe
_data : array_like
Original EEG data of shape *(n_chan, n_samples)*.
_data_filt : array_like
Sigma-filtered EEG data of shape *(n_chan, n_samples)*.
_sf : float
Sampling frequency of data.
_ch_names : list
Channel names.
_hypno : array_like or None
Sleep staging vector.
"""
def __init__(self, events, data, sf, ch_names, hypno, data_filt):
super().__init__(events, data, sf, ch_names, hypno, data_filt)
def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc='mean', sort=True):
"""Return a summary of the spindles detection, optionally grouped
across channels and/or stage.
Parameters
----------
grp_chan : bool
If True, group by channel (for multi-channels detection only).
grp_stage : bool
If True, group by sleep stage (provided that an hypnogram was
used).
mask : array_like or None
Custom boolean mask. Only the detected events for which mask is True will be
included in the summary dataframe. Default is None, i.e. no masking
(all events are included).
aggfunc : str or function
Averaging function (e.g. ``'mean'`` or ``'median'``).
sort : bool
If True, sort group keys when grouping.
"""
return super().summary(event_type='spindles', grp_chan=grp_chan, grp_stage=grp_stage,
aggfunc=aggfunc, sort=sort, mask=mask)
def get_coincidence_matrix(self, scaled=True):
"""Return the (scaled) coincidence matrix.
Parameters
----------
scaled : bool
If True (default), the coincidence matrix is scaled (see Notes).
Returns
-------
coincidence : pd.DataFrame
A symmetric matrix with the (scaled) coincidence values.
Notes
-----
Do spindles occur at the same time? One way to measure this is to
calculate the coincidence matrix, which gives, for each pair of
channel, the number of samples that were marked as a spindle in both
channels. The output is a symmetric matrix, in which the diagonal is
simply the number of data points that were marked as a spindle in the
channel.
The coincidence matrix can be scaled (default) by dividing the output
by the product of the sum of each individual binary mask, as shown in
the example below. It can then be used to define functional
networks or quickly find outlier channels.
Examples
--------
Calculate the coincidence of two binary mask:
>>> import numpy as np
>>> x = np.array([0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1])
>>> y = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1])
>>> x * y
array([0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1])
>>> (x * y).sum() # Unscaled coincidence
3
>>> (x * y).sum() / (x.sum() * y.sum()) # Scaled coincidence
0.12
References
----------
- https://github.com/Mark-Kramer/Sleep-Networks-2021
"""
return super().get_coincidence_matrix(scaled=scaled)
def get_mask(self):
"""Return a boolean array indicating for each sample in data if this
sample is part of a detected event (True) or not (False).
"""
return super().get_mask()
def get_sync_events(self, center='Peak', time_before=1, time_after=1, filt=(None, None),
mask=None, as_dataframe=True):
"""
Return the raw or filtered data of each detected event after
centering to a specific timepoint.
Parameters
----------
center : str
Landmark of the event to synchronize the timing on.
Default is to use the center peak of the spindles.
time_before : float
Time (in seconds) before ``center``.
time_after : float
Time (in seconds) after ``center``.
filt : tuple
Optional filtering to apply to data. For instance, ``filt=(1, 30)``
will apply a 1 to 30 Hz bandpass filter, and ``filt=(None, 40)``
will apply a 40 Hz lowpass filter. Filtering is done using default
parameters in the :py:func:`mne.filter.filter_data` function.
mask : array_like or None
Custom boolean mask. Only the detected events for which mask is True will be
included. Default is None, i.e. no masking (all events are included).
as_dataframe : boolean
If True (default), returns a long-format pandas dataframe. If False, returns a list of
numpy arrays. Each element of the list a unique channel, and the shape of the numpy
arrays within the list is (n_events, n_times).
Returns
-------
df_sync : :py:class:`pandas.DataFrame`
Ouput long-format dataframe (if ``as_dataframe=True``)::
'Event' : Event number
'Time' : Timing of the events (in seconds)
'Amplitude' : Raw or filtered data for event
'Channel' : Channel
'IdxChannel' : Index of channel in data
'Stage': Sleep stage in which the events occured (if available)
"""
return super().get_sync_events(center=center, time_before=time_before,
time_after=time_after, filt=filt, mask=mask,
as_dataframe=as_dataframe)
def plot_average(self, center='Peak', hue='Channel', time_before=1,
time_after=1, filt=(None, None), mask=None, figsize=(6, 4.5), **kwargs):
"""
Plot the average spindle.
Parameters
----------
center : str
Landmark of the event to synchronize the timing on.
Default is to use the most prominent peak of the spindle.
hue : str
Grouping variable that will produce lines with different colors.
Can be either 'Channel' or 'Stage'.
time_before : float
Time (in seconds) before ``center``.
time_after : float
Time (in seconds) after ``center``.
filt : tuple
Optional filtering to apply to data. For instance, ``filt=(12, 16)``
will apply a 12 to 16 Hz bandpass filter, and ``filt=(None, 40)``
will apply a 40 Hz lowpass filter. Filtering is done using the default
parameters in the :py:func:`mne.filter.filter_data` function.
mask : array_like or None
Custom boolean mask. Only the detected events for which mask is True will be
plotted. Default is None, i.e. no masking (all events are included).
figsize : tuple
Figure size in inches.
**kwargs : dict
Optional argument that are passed to :py:func:`seaborn.lineplot`.
"""
return super().plot_average(event_type='spindles', center=center,
hue=hue, time_before=time_before,
time_after=time_after, filt=filt, mask=mask,
figsize=figsize, **kwargs)
def plot_detection(self):
"""Plot an overlay of the detected spindles on the EEG signal.
This only works in Jupyter and it requires the ipywidgets
(https://ipywidgets.readthedocs.io/en/latest/) package.
To activate the interactive mode, make sure to run:
>>> %matplotlib widget
.. versionadded:: 0.4.0
"""
return super().plot_detection()
#############################################################################
# SLOW-WAVES DETECTION
#############################################################################
def sw_detect(data, sf=None, ch_names=None, hypno=None, include=(2, 3), freq_sw=(0.3, 1.5),
dur_neg=(0.3, 1.5), dur_pos=(0.1, 1), amp_neg=(40, 200), amp_pos=(10, 150),
amp_ptp=(75, 350), coupling=False,
coupling_params={"freq_sp": (12, 16), "time": 1, "p": 0.05},
remove_outliers=False, verbose=False):
"""Slow-waves detection.
Parameters
----------
data : array_like
Single or multi-channel data. Unit must be uV and shape (n_samples) or
(n_chan, n_samples). Can also be a :py:class:`mne.io.BaseRaw`,
in which case ``data``, ``sf``, and ``ch_names`` will be automatically
extracted, and ``data`` will also be automatically converted from
Volts (MNE) to micro-Volts (YASA).
sf : float
Sampling frequency of the data in Hz.
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
.. tip:: If the detection is taking too long, make sure to downsample
your data to 100 Hz (or 128 Hz). For more details, please refer to
:py:func:`mne.filter.resample`.
ch_names : list of str
Channel names. Can be omitted if ``data`` is a
:py:class:`mne.io.BaseRaw`.
hypno : array_like
Sleep stage (hypnogram). If the hypnogram is loaded, the
detection will only be applied to the value defined in
``include`` (default = N2 + N3 sleep).
The hypnogram must have the same number of samples as ``data``.
To upsample your hypnogram, please refer to
:py:func:`yasa.hypno_upsample_to_data`.
.. note::
The default hypnogram format in YASA is a 1D integer
vector where:
- -2 = Unscored
- -1 = Artefact / Movement
- 0 = Wake
- 1 = N1 sleep
- 2 = N2 sleep
- 3 = N3 sleep
- 4 = REM sleep
include : tuple, list or int
Values in ``hypno`` that will be included in the mask. The default is
(2, 3), meaning that the detection is applied on N2 and N3
sleep. This has no effect when ``hypno`` is None.
freq_sw : tuple or list
Slow wave frequency range. Default is 0.3 to 1.5 Hz. Please note that
YASA uses a FIR filter (implemented in MNE) with a 0.2 Hz transition
band, which means that the -6 dB points are located at 0.2 and 1.6 Hz.
dur_neg : tuple or list
The minimum and maximum duration of the negative deflection of the
slow wave. Default is 0.3 to 1.5 second.
dur_pos : tuple or list
The minimum and maximum duration of the positive deflection of the
slow wave. Default is 0.1 to 1 second.
amp_neg : tuple or list
Absolute minimum and maximum negative trough amplitude of the
slow-wave. Default is 40 uV to 200 uV. Can also be in unit of standard
deviations if the data has been previously z-scored. If you do not want
to specify any negative amplitude thresholds,
use ``amp_neg=(None, None)``.
amp_pos : tuple or list
Absolute minimum and maximum positive peak amplitude of the
slow-wave. Default is 10 uV to 150 uV. Can also be in unit of standard
deviations if the data has been previously z-scored.
If you do not want to specify any positive amplitude thresholds,
use ``amp_pos=(None, None)``.
amp_ptp : tuple or list
Minimum and maximum peak-to-peak amplitude of the slow-wave.
Default is 75 uV to 350 uV. Can also be in unit of standard
deviations if the data has been previously z-scored.
Use ``np.inf`` to set no upper amplitude threshold
(e.g. ``amp_ptp=(75, np.inf)``).
coupling : boolean
If True, YASA will also calculate the phase-amplitude coupling between
the slow-waves phase and the spindles-related sigma band
amplitude. Specifically, the following columns will be added to the
output dataframe:
1. ``'SigmaPeak'``: The location (in seconds) of the maximum sigma peak amplitude within a
2-seconds epoch centered around the negative peak (through) of the current slow-wave.
2. ``PhaseAtSigmaPeak``: the phase of the bandpas-filtered slow-wave signal (in radians)
at ``'SigmaPeak'``.
Importantly, since ``PhaseAtSigmaPeak`` is expressed in radians, one should use circular
statistics to calculate the mean direction and vector length:
.. code-block:: python
import pingouin as pg
mean_direction = pg.circ_mean(sw['PhaseAtSigmaPeak'])
vector_length = pg.circ_r(sw['PhaseAtSigmaPeak'])
3. ``ndPAC``: the normalized Mean Vector Length (also called the normalized direct PAC,
or ndPAC) within a 2-sec epoch centered around the negative peak of the slow-wave.
The lower and upper frequencies for the slow-waves and spindles-related sigma signals are
defined in ``freq_sw`` and ``coupling_params['freq_sp']``, respectively.
For more details, please refer to the `Jupyter notebook
<https://github.com/raphaelvallat/yasa/blob/master/notebooks/12_SO-sigma_coupling.ipynb>`_
Note that setting ``coupling=True`` may increase computation time.
.. versionadded:: 0.2.0
coupling_params : dict
Parameters for the phase-amplitude coupling.
* ``freq_sp`` is a tuple or list that defines the spindles-related frequency of interest.
The default is 12 to 16 Hz, with a wide transition bandwidth of 1.5 Hz.
* ``time`` is an int or a float that defines the time around the negative peak of each
detected slow-waves, in seconds. For example, a value of 1 means that the coupling will
be calculated for each slow-waves using a 2-seconds epoch centered around the negative
peak of the slow-waves (i.e. 1 second on each side).
* ``p`` is a parameter passed to the :py:func:`tensorpac.methods.norm_direct_pac``
function. It represents the p-value to use for thresholding of unreliable coupling
values. Sub-threshold PAC values will be set to 0. To disable this behavior (no masking),
use ``p=1`` or ``p=None``.
.. versionadded:: 0.6.0
remove_outliers : boolean
If True, YASA will automatically detect and remove outliers slow-waves
using :py:class:`sklearn.ensemble.IsolationForest`.
The outliers detection is performed on the frequency, amplitude and
duration parameters of the detected slow-waves. YASA uses a random seed
(42) to ensure reproducible results. Note that this step will only be
applied if there are more than 50 detected slow-waves in the first
place. Default to False.
verbose : bool or str
Verbose level. Default (False) will only print warning and error
messages. The logging levels are 'debug', 'info', 'warning', 'error',
and 'critical'. For most users the choice is between 'info'
(or ``verbose=True``) and warning (``verbose=False``).
.. versionadded:: 0.2.0
Returns
-------
sw : :py:class:`yasa.SWResults`
To get the full detection dataframe, use:
>>> sw = sw_detect(...)
>>> sw.summary()
This will give a :py:class:`pandas.DataFrame` where each row is a
detected slow-wave and each column is a parameter (= property).
To get the average SW parameters per channel and sleep stage:
>>> sw.summary(grp_chan=True, grp_stage=True)
Notes
-----
The parameters that are calculated for each slow-wave are:
* ``'Start'``: Start time of each detected slow-wave, in seconds from the beginning of data.
* ``'NegPeak'``: Location of the negative peak (in seconds)
* ``'MidCrossing'``: Location of the negative-to-positive zero-crossing (in seconds)
* ``'Pospeak'``: Location of the positive peak (in seconds)
* ``'End'``: End time(in seconds)
* ``'Duration'``: Duration (in seconds)
* ``'ValNegPeak'``: Amplitude of the negative peak (in uV, calculated on the ``freq_sw``
bandpass-filtered signal)
* ``'ValPosPeak'``: Amplitude of the positive peak (in uV, calculated on the ``freq_sw``
bandpass-filtered signal)
* ``'PTP'``: Peak-to-peak amplitude (= ``ValPosPeak`` - ``ValNegPeak``, calculated on the
``freq_sw`` bandpass-filtered signal)
* ``'Slope'``: Slope between ``NegPeak`` and ``MidCrossing`` (in uV/sec, calculated on the
``freq_sw`` bandpass-filtered signal)
* ``'Frequency'``: Frequency of the slow-wave (= 1 / ``Duration``)
* ``'SigmaPeak'``: Location of the sigma peak amplitude within a 2-sec epoch centered around
the negative peak of the slow-wave. This is only calculated when ``coupling=True``.
* ``'PhaseAtSigmaPeak'``: SW phase at max sigma amplitude within a 2-sec epoch centered around
the negative peak of the slow-wave. This is only calculated when ``coupling=True``
* ``'ndPAC'``: Normalized direct PAC within a 2-sec epoch centered around the negative peak
of the slow-wave. This is only calculated when ``coupling=True``
* ``'Stage'``: Sleep stage (only if hypno was provided)
.. image:: https://raw.githubusercontent.com/raphaelvallat/yasa/master/docs/pictures/slow_waves.png # noqa
:width: 500px
:align: center
:alt: slow-wave
For better results, apply this detection only on artefact-free NREM sleep.
References
----------
The slow-waves detection algorithm is based on:
* <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2004). `The sleep slow
oscillation as a traveling wave. <https://doi.org/10.1523/JNEUROSCI.1318-04.2004>`_. The
Journal of Neuroscience, 24(31), 6862–6870.
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011). `Sleep slow wave changes during the
middle years of life. <https://doi.org/10.1111/j.1460-9568.2010.07543.x>`_. The European
Journal of Neuroscience, 33(4), 758–766.
Examples
--------
For an example of how to run the detection, please refer to the tutorial:
https://github.com/raphaelvallat/yasa/blob/master/notebooks/05_sw_detection.ipynb
"""
set_log_level(verbose)
(data, sf, ch_names, hypno, include, mask, n_chan, n_samples, bad_chan
) = _check_data_hypno(data, sf, ch_names, hypno, include)
# If all channels are bad
if sum(bad_chan) == n_chan:
logger.warning('All channels have bad amplitude. Returning None.')
return None
# Define time vector
times = np.arange(data.size) / sf
idx_mask = np.where(mask)[0]
# Bandpass filter
nfast = next_fast_len(n_samples)
data_filt = filter_data(
data, sf, freq_sw[0], freq_sw[1], method='fir', verbose=0, l_trans_bandwidth=0.2,
h_trans_bandwidth=0.2)
# Extract the spindles-related sigma signal for coupling
if coupling:
is_tensorpac_installed()
import tensorpac.methods as tpm
# The width of the transition band is set to 1.5 Hz on each side,
# meaning that for freq_sp = (12, 15 Hz), the -6 dB points are located
# at 11.25 and 15.75 Hz. The frequency band for the amplitude signal
# must be large enough to fit the sidebands caused by the assumed
# modulating lower frequency band (Aru et al. 2015).
# https://doi.org/10.1016/j.conb.2014.08.002
assert isinstance(coupling_params, dict)
assert "freq_sp" in coupling_params.keys()
assert "time" in coupling_params.keys()
assert "p" in coupling_params.keys()
freq_sp = coupling_params['freq_sp']
data_sp = filter_data(
data, sf, freq_sp[0], freq_sp[1], method='fir', l_trans_bandwidth=1.5,
h_trans_bandwidth=1.5, verbose=0)
# Now extract the instantaneous phase/amplitude using Hilbert transform
sw_pha = np.angle(signal.hilbert(data_filt, N=nfast)[:, :n_samples])
sp_amp = np.abs(signal.hilbert(data_sp, N=nfast)[:, :n_samples])
# Initialize empty output dataframe
df = pd.DataFrame()
for i in range(n_chan):
# ####################################################################
# START SINGLE CHANNEL DETECTION
# ####################################################################
# First, skip channels with bad data amplitude
if bad_chan[i]:
continue
# Find peaks in data
# Negative peaks with value comprised between -40 to -300 uV
idx_neg_peaks, _ = signal.find_peaks(-1 * data_filt[i, :], height=amp_neg)
# Positive peaks with values comprised between 10 to 200 uV
idx_pos_peaks, _ = signal.find_peaks(data_filt[i, :], height=amp_pos)
# Intersect with sleep stage vector
idx_neg_peaks = np.intersect1d(idx_neg_peaks, idx_mask, assume_unique=True)
idx_pos_peaks = np.intersect1d(idx_pos_peaks, idx_mask, assume_unique=True)
# If no peaks are detected, return None
if len(idx_neg_peaks) == 0 or len(idx_pos_peaks) == 0:
logger.warning('No SW were found in channel %s.', ch_names[i])
continue
# Make sure that the last detected peak is a positive one
if idx_pos_peaks[-1] < idx_neg_peaks[-1]:
# If not, append a fake positive peak one sample after the last neg
idx_pos_peaks = np.append(idx_pos_peaks, idx_neg_peaks[-1] + 1)
# For each negative peak, we find the closest following positive peak
pk_sorted = np.searchsorted(idx_pos_peaks, idx_neg_peaks)
closest_pos_peaks = idx_pos_peaks[pk_sorted] - idx_neg_peaks
closest_pos_peaks = closest_pos_peaks[np.nonzero(closest_pos_peaks)]
idx_pos_peaks = idx_neg_peaks + closest_pos_peaks
# Now we compute the PTP amplitude and keep only the good peaks
sw_ptp = (np.abs(data_filt[i, idx_neg_peaks]) + data_filt[i, idx_pos_peaks])
good_ptp = np.logical_and(sw_ptp > amp_ptp[0], sw_ptp < amp_ptp[1])
# If good_ptp is all False
if all(~good_ptp):
logger.warning('No SW were found in channel %s.', ch_names[i])
continue
sw_ptp = sw_ptp[good_ptp]
idx_neg_peaks = idx_neg_peaks[good_ptp]
idx_pos_peaks = idx_pos_peaks[good_ptp]
# Now we need to check the negative and positive phase duration
# For that we need to compute the zero crossings of the filtered signal
zero_crossings = _zerocrossings(data_filt[i, :])
# Make sure that there is a zero-crossing after the last detected peak
if zero_crossings[-1] < max(idx_pos_peaks[-1], idx_neg_peaks[-1]):
# If not, append the index of the last peak
zero_crossings = np.append(zero_crossings, max(idx_pos_peaks[-1], idx_neg_peaks[-1]))
# Find distance to previous and following zc
neg_sorted = np.searchsorted(zero_crossings, idx_neg_peaks)
previous_neg_zc = zero_crossings[neg_sorted - 1] - idx_neg_peaks
following_neg_zc = zero_crossings[neg_sorted] - idx_neg_peaks
# Distance between the positive peaks and the previous and
# following zero-crossings
pos_sorted = np.searchsorted(zero_crossings, idx_pos_peaks)
previous_pos_zc = zero_crossings[pos_sorted - 1] - idx_pos_peaks
following_pos_zc = zero_crossings[pos_sorted] - idx_pos_peaks
# Duration of the negative and positive phases, in seconds
neg_phase_dur = (np.abs(previous_neg_zc) + following_neg_zc) / sf
pos_phase_dur = (np.abs(previous_pos_zc) + following_pos_zc) / sf
# We now compute a set of metrics
sw_start = times[idx_neg_peaks + previous_neg_zc]
sw_end = times[idx_pos_peaks + following_pos_zc]
# This should be the same as `sw_dur = pos_phase_dur + neg_phase_dur`
# We round to avoid floating point errr (e.g. 1.9000000002)
sw_dur = (sw_end - sw_start).round(4)
sw_dur_both_phase = (pos_phase_dur + neg_phase_dur).round(4)
sw_midcrossing = times[idx_neg_peaks + following_neg_zc]
sw_idx_neg = times[idx_neg_peaks] # Location of negative peak
sw_idx_pos = times[idx_pos_peaks] # Location of positive peak
# Slope between peak trough and midcrossing
sw_slope = sw_ptp / (sw_midcrossing - sw_idx_neg)
# Hypnogram
if hypno is not None:
sw_sta = hypno[idx_neg_peaks]
else:
sw_sta = np.zeros(sw_dur.shape)
# And we apply a set of thresholds to remove bad slow waves
good_sw = np.logical_and.reduce((
# Data edges
previous_neg_zc != 0,
following_neg_zc != 0,
previous_pos_zc != 0,
following_pos_zc != 0,
# Duration criteria
sw_dur == sw_dur_both_phase, # dur = negative + positive
sw_dur <= dur_neg[1] + dur_pos[1], # dur < max(neg) + max(pos)
sw_dur >= dur_neg[0] + dur_pos[0], # dur > min(neg) + min(pos)
neg_phase_dur > dur_neg[0],
neg_phase_dur < dur_neg[1],
pos_phase_dur > dur_pos[0],
pos_phase_dur < dur_pos[1],
# Sanity checks
sw_midcrossing > sw_start,
sw_midcrossing < sw_end,
sw_slope > 0,
))
if all(~good_sw):
logger.warning('No SW were found in channel %s.', ch_names[i])
continue
# Filter good events
idx_neg_peaks = idx_neg_peaks[good_sw]
idx_pos_peaks = idx_pos_peaks[good_sw]
sw_start = sw_start[good_sw]
sw_idx_neg = sw_idx_neg[good_sw]
sw_midcrossing = sw_midcrossing[good_sw]
sw_idx_pos = sw_idx_pos[good_sw]
sw_end = sw_end[good_sw]
sw_dur = sw_dur[good_sw]
sw_ptp = sw_ptp[good_sw]
sw_slope = sw_slope[good_sw]
sw_sta = sw_sta[good_sw]
# Create a dictionnary
sw_params = OrderedDict({
'Start': sw_start,
'NegPeak': sw_idx_neg,
'MidCrossing': sw_midcrossing,
'PosPeak': sw_idx_pos,
'End': sw_end,
'Duration': sw_dur,
'ValNegPeak': data_filt[i, idx_neg_peaks],
'ValPosPeak': data_filt[i, idx_pos_peaks],
'PTP': sw_ptp,
'Slope': sw_slope,
'Frequency': 1 / sw_dur,
'Stage': sw_sta,
})
# Add phase (in radians) of slow-oscillation signal at maximum
# spindles-related sigma amplitude within a XX-seconds centered epochs.
if coupling:
# Get phase and amplitude for each centered epoch
time_before = time_after = coupling_params['time']
assert float(sf * time_before).is_integer(), (
"Invalid time parameter for coupling. Must be a whole number of samples.")
bef = int(sf * time_before)
aft = int(sf * time_after)
# Center of each epoch is defined as the negative peak of the SW
n_peaks = idx_neg_peaks.shape[0]
# idx.shape = (len(idx_valid), bef + aft + 1)
idx, idx_valid = get_centered_indices(data[i, :], idx_neg_peaks, bef, aft)
sw_pha_ev = sw_pha[i, idx]
sp_amp_ev = sp_amp[i, idx]
# 1) Find location of max sigma amplitude in epoch
idx_max_amp = sp_amp_ev.argmax(axis=1)
# Now we need to append it back to the original unmasked shape
# to avoid error when idx.shape[0] != idx_valid.shape, i.e.
# some epochs were out of data bounds.
sw_params['SigmaPeak'] = np.ones(n_peaks) * np.nan
# Timestamp at sigma peak, expressed in seconds from negative peak
# e.g. -0.39, 0.5, 1, 2 -- limits are [time_before, time_after]
time_sigpk = (idx_max_amp - bef) / sf
# convert to absolute time from beginning of the recording
# time_sigpk only includes valid epoch
time_sigpk_abs = sw_idx_neg[idx_valid] + time_sigpk
sw_params['SigmaPeak'][idx_valid] = time_sigpk_abs
# 2) PhaseAtSigmaPeak
# Find SW phase at max sigma amplitude in epoch
pha_at_max = np.squeeze(np.take_along_axis(sw_pha_ev, idx_max_amp[..., None], axis=1))
sw_params['PhaseAtSigmaPeak'] = np.ones(n_peaks) * np.nan
sw_params['PhaseAtSigmaPeak'][idx_valid] = pha_at_max
# 3) Normalized Direct PAC, with thresholding
# Unreliable values are set to 0
ndp = np.squeeze(tpm.norm_direct_pac(
sw_pha_ev[None, ...], sp_amp_ev[None, ...], p=coupling_params['p']))
sw_params['ndPAC'] = np.ones(n_peaks) * np.nan
sw_params['ndPAC'][idx_valid] = ndp
# Make sure that Stage is the last column of the dataframe
sw_params.move_to_end('Stage')
# Convert to dataframe, keeping only good events
df_chan = pd.DataFrame(sw_params)
# Remove all duplicates
df_chan = df_chan.drop_duplicates(subset=['Start'], keep=False)
df_chan = df_chan.drop_duplicates(subset=['End'], keep=False)
# We need at least 50 detected slow waves to apply the Isolation Forest
if remove_outliers and df_chan.shape[0] >= 50:
col_keep = ['Duration', 'ValNegPeak', 'ValPosPeak', 'PTP', 'Slope', 'Frequency']
ilf = IsolationForest(contamination='auto', max_samples='auto',
verbose=0, random_state=42)
good = ilf.fit_predict(df_chan[col_keep])
good[good == -1] = 0
logger.info('%i outliers were removed in channel %s.'
% ((good == 0).sum(), ch_names[i]))
# Remove outliers from DataFrame
df_chan = df_chan[good.astype(bool)]
logger.info('%i slow-waves were found in channel %s.'
% (df_chan.shape[0], ch_names[i]))
# ####################################################################
# END SINGLE CHANNEL DETECTION
# ####################################################################
df_chan['Channel'] = ch_names[i]
df_chan['IdxChannel'] = i
df =
|
pd.concat([df, df_chan], axis=0, ignore_index=True)
|
pandas.concat
|
import osmnx as ox
import networkx as nx
import pandas as pd
import geopandas as gpd
from tqdm import tqdm
from shapely.geometry import shape, Polygon, Point
import warnings
warnings.filterwarnings(action='ignore', message='Mean of empty slice')
import numpy as np
def bikeability(place, scale = 'city',data = False):
''' A function that would calculate bikeability value for a given
place of interest.
Parameters
place: the place of interest e.g "Freiburg, Germany" datatype = string
Scale: can be either "grid" or "city" default is "city" datatype = string
data: if True output returns a dataframe along with the standard dictionary
output, datatype = boolean
Returns the average_index for bikeability(number between 0 and 100) and some
summary statistics of index, datatype = dictionary or dataframe and dictionary
if data is set as True.
Usage example
a = bikeability('Freiburg, Germany', scale ='grid', data = False) ... for grid scale approach
a,b = bikeability('Freiburg, Germany', scale ='grid', data = True)
a =bikeability('Freiburg, Germany', scale = 'city')... for city scale approach
a,b =bikeability('Freiburg, Germany', scale = 'city', data = True)
'''
if scale != 'grid':
place = place
# Create and set osmnx to select important tags
useful_tags_way = ['bridge', 'length', 'oneway', 'lanes', 'ref', 'name',
'highway', 'maxspeed', 'service', 'access', 'area', 'cycleway',
'landuse', 'width', 'est_width', 'junction', 'surface']
ox.utils.config(useful_tags_way = useful_tags_way) # = useful_tags_path change here1
# Create basic city graph
place_name = place
graph = ox.graph_from_place(place_name, network_type='all', retain_all=True)
# # Calculate and add edge closeness centrality(connectedness)
centrality = nx.degree_centrality(nx.line_graph(graph))
nx.set_edge_attributes(graph, centrality, 'centrality')
# Extract nodes and edges to geopandas from graph
#edges = ox.graph_to_gdfs(graph, nodes=False)
try:
edges = ox.graph_to_gdfs(graph, nodes= False)
pass
except Exception as e:
print('{} at {}'.format(e, place))
# Remove unwanted columns and add weight variable
cols = ['highway', 'cycleway', 'surface', 'maxspeed', 'length', 'lanes', 'oneway',
'width', 'centrality', 'geometry']
try:
df = edges.loc[:,cols]
except KeyError as e:
print (e)
# Set appropriate data types
df['maxspeed'] = pd.to_numeric(
df['maxspeed'], errors='coerce', downcast='integer')
df['lanes'] = pd.to_numeric(
df['lanes'], errors='coerce', downcast='integer')
df['width'] = pd.to_numeric(
df['width'], errors='coerce', downcast='unsigned')
df['highway'] = df['highway'].astype(str)
df['surface'] = df['surface'].astype(str)
df['oneway'] = df['oneway'].astype(int)
df['cycleway'] = df['cycleway'].astype(str)
# Dataframe cleaning and preprocessing
# highway column
df['highway'] = df['highway'].str.replace(r'[^\w\s-]', '', regex = True)
highway_cols = (pd.DataFrame(df.highway.str.split(' ', expand=True)))
highway_map = ({'service': 6, 'None': np.nan, 'residential': 8, 'unclassified': 7, 'footway': 7, 'track': 5,
'tertiary': 6, 'living_street': 9, 'path': 5, 'pedestrian': 7, 'secondary': 5,
'primary': 2, 'steps': 2, 'cycleway': 10, 'rest_area': 5, 'primary_link': 2, 'ferry': 1,
'construction': 2, 'byway': 8, 'bridleway': 6, 'trunk': 2, 'trunk_link': 2, 'motorway': 1, 'motorway_link': 1})
for column in highway_cols:
highway_cols[column] = highway_cols[column].map(highway_map)
highway_cols['mean'] = np.nanmean(highway_cols, axis=1)
df['highway'] = round(highway_cols['mean'])
# cycleway column
df['cycleway'] = df['cycleway'].str.replace(r'[^\w\s-]', '', regex = True)
cycleway_cols = (pd.DataFrame(df.cycleway.str.split(' ', expand=True)))
cycleway_map = ({'opposite': 9, 'lane': 9, 'share_busway': 8, 'shared_lane': 8, 'segregated': 10,
'no': 1, 'opposite_lane': 9, 'crossing': 10, 'track': 10, 'designated': 10,
'opposite_share_busway': 8, 'seperate': 10, 'shoulder': 8})
for column in cycleway_cols:
cycleway_cols[column] = cycleway_cols[column].map(cycleway_map)
cycleway_cols['mean'] = np.nanmean(cycleway_cols, axis=1)
df['cycleway'] = round(cycleway_cols['mean'])
# surface column
df['surface'] = df['surface'].str.replace(r'[^\w\s-]', '', regex=True)
surface_cols = (pd.DataFrame(df.surface.str.split(' ', expand=True)))
surface_map = ({'asphalt': 10, 'paved': 10, 'cobblestone': 5, 'fine_gravel': 9,
'ground': 7, 'sett': 6, 'gravel': 7, 'metal': 6, 'compacted': 10,
'dirt': 6, 'paving_stones': 7, 'grass_paver': 5, 'unpaved': 8,
'pebblestone': 9, 'concrete': 10, 'grass': 5, 'mud': 1})
for column in surface_cols:
surface_cols[column] = surface_cols[column].map(surface_map)
surface_cols['mean'] = np.nanmean(surface_cols, axis=1)
df['surface'] = round(surface_cols['mean'])
# maxspeed column
df.loc[df['maxspeed'] > 110, 'maxspeed'] = 110
df.loc[df['maxspeed'] < 20, 'maxspeed'] = 20
maxspeed_map = ({20: 10, 30: 9, 40: 8, 50: 7, 60: 6,
70: 5, 80: 4, 90: 3, 100: 2, 110: 1})
df['maxspeed'] = df['maxspeed'].map(maxspeed_map)
# lanes column
df.loc[df['lanes'] > 8, 'lanes'] = 8
lanes_map = {1: 10, 2: 9, 3: 5, 4: 5, 5: 3, 6: 3, 7: 2, 8: 1}
df['lanes'] = df['lanes'].map(lanes_map)
# oneway column
oneway_map = {0: 5, 1: 10, -1: 5}
df['oneway'] = df['oneway'].map(oneway_map)
# width column
df.loc[df['width'] < 2, 'width'] = 1
df.loc[df['width'] > 6, 'width'] = 6
df['width'] = round(df['width'])
width_map = ({1: 1, 2: 2, 3: 5, 4: 7, 5: 9, 6: 10})
df['width'] = df['width'].map(width_map)
# normalize centrality column (between o and 10)
df['centrality'] = ((df['centrality'] - np.min(df['centrality'])) /
(np.max(df['centrality']) - np.min(df['centrality']))) * 10
# Switch to new df for calculation
d_frame = df.copy(deep=True)
# Multiply variables by weights
d_frame['cycleway'] = d_frame['cycleway'] * 0.208074534
d_frame['surface'] = d_frame['surface'] * 0.108695652
d_frame['highway'] = d_frame['highway'] * 0.167701863
d_frame['maxspeed'] = d_frame['maxspeed'] * 0.189440994
d_frame['lanes'] = d_frame['lanes'] * 0.108695652
d_frame['centrality'] = d_frame['centrality'] * 0.071428571
d_frame['width'] = d_frame['width'] * 0.086956522
d_frame['oneway'] = d_frame['oneway'] * 0.059006211
# Normalize variables between 0 and 1
d_frame['index'] = (np.nanmean(d_frame[['cycleway', 'highway', 'surface', 'maxspeed', 'lanes', 'width', 'oneway',
'centrality']], axis=1, dtype='float64')) * 80
# Final statistics index of city
mean_index = np.average(d_frame['index'], weights=d_frame['length'])
max_index = d_frame['index'].max()
min_index = d_frame['index'].min()
std_index = d_frame['index'].std()
# Plot result
#d_frame.plot(column = 'index',legend = True)
# Result dictionary
result = ({'place': place, 'average_index': mean_index, 'max_index': max_index,
'min_index': min_index, 'std_index': std_index})
else:
#Get bounding box for place
place_name = place
area = ox.geocode_to_gdf(place_name) # graph first
xmin,ymin,xmax,ymax = area.total_bounds
#divide into grids x = lon, y = lat
height = 0.041667
width = 0.041667
rows = int(np.ceil((ymax-ymin) / height))
cols = int(np.ceil((xmax-xmin) / width))
XleftOrigin = xmin
XrightOrigin = xmin + width
YtopOrigin = ymax
YbottomOrigin = ymax- height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom =YbottomOrigin
for j in range(rows):
polygons.append(Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
#Ensure the grids are within the polygon
grid_list = []
for i in range(len(polygons)):
p = Point(polygons[i].centroid.x, polygons[i].centroid.y)
geome = shape(polygons[i])
q =gpd.GeoDataFrame({'geometry':geome}, index=[0])
q = q.set_crs("EPSG:4326")
if area.geometry.iloc[0].contains(polygons[i])== True:
grid_list.append(q)
#elif p.within(area.geometry.iloc[0]) == True and area.geometry.iloc[0].contains(polygons[i])== False:
elif area.geometry.iloc[0].intersects(polygons[i]):
#grid_list.append(polygons[i])
clip = gpd.clip(area, q)
grid_list.append(clip)
#Initialize important variables
dflist = []
exception_grids = []
dfs = []
for i in tqdm(range(len(grid_list))):
#graph
useful_tags_way = ['bridge', 'length', 'oneway', 'lanes', 'ref',
'name', 'highway', 'maxspeed', 'surface', 'area',
'landuse', 'width', 'est_width', 'junction','cycleway']
ox.utils.config(useful_tags_way = useful_tags_way) # = =useful_tags_path change 2
try:
box_graph =ox.graph_from_polygon(grid_list[i].geometry.iloc[0], network_type='bike',retain_all=True)
pass
except Exception as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Calculate and add edge closeness centrality(connectedness)
centrality = nx.degree_centrality(nx.line_graph(box_graph))
nx.set_edge_attributes(box_graph, centrality, 'centrality')
# Extract nodes and edges to geopandas from graph
try:
edges = ox.graph_to_gdfs(box_graph, nodes= False)
pass
except Exception as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Select only the important variables
cols = ['highway','cycleway', 'surface', 'maxspeed', 'length', 'lanes', 'oneway',
'width', 'centrality', 'geometry']
try:
df = edges.loc[:,cols]
pass
except KeyError as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Set appropriate data types
df['maxspeed'] = pd.to_numeric(
df['maxspeed'], errors='coerce', downcast='integer')
df['lanes'] = pd.to_numeric(
df['lanes'], errors='coerce', downcast='integer')
df['width'] = pd.to_numeric(
df['width'], errors='coerce', downcast='unsigned')
df['highway'] = df['highway'].astype(str)
df['surface'] = df['surface'].astype(str)
df['oneway'] = df['oneway'].astype(int)
df['cycleway'] = df['cycleway'].astype(str)
# Dataframe cleaning and preprocessing
# highway column
df['highway'] = df['highway'].str.replace(r'[^\w\s-]', '', regex = True)
highway_cols = (pd.DataFrame(df.highway.str.split(' ', expand = True)))
highway_map = ({'service': 6, 'None': np.nan, 'residential': 8, 'unclassified': 7, 'footway': 7, 'track': 5, 'tertiary_link':6,
'tertiary': 6, 'living_street': 9, 'path': 5, 'pedestrian': 7, 'secondary': 5, 'secondary_link':5,
'primary': 2, 'steps': 2, 'cycleway': 10, 'rest_area': 5, 'primary_link': 2, 'ferry': 1,
'construction': 2, 'byway': 8, 'bridleway': 6, 'trunk': 2, 'trunk_link': 2, 'motorway': 1, 'motorway_link': 1})
for column in highway_cols:
highway_cols[column] = highway_cols[column].map(highway_map)
highway_cols['mean'] = np.nanmean(highway_cols, axis=1)
df['highway'] = round(highway_cols['mean'])
#cycleway column
df['cycleway'] = df['cycleway'].str.replace(r'[^\w\s-]', '', regex = True)
cycleway_cols = (pd.DataFrame(df.cycleway.str.split(' ', expand = True)))
cycleway_map = ({'opposite':9, 'lane':9, 'share_busway':8, 'shared_lane':8,'segregated':10,
'no':1, 'opposite_lane':9, 'crossing':10, 'track':10, 'designated':10,
'opposite_share_busway':8, 'seperate':10, 'shoulder':8})
for column in cycleway_cols:
cycleway_cols[column] = cycleway_cols[column].map(cycleway_map)
cycleway_cols['mean'] = np.nanmean(cycleway_cols, axis=1)
df['cycleway'] = round(cycleway_cols['mean'])
# surface column
df['surface'] = df['surface'].str.replace(r'[^\w\s-]', '', regex = True) #''
surface_cols = (pd.DataFrame(df.surface.str.split(' ', expand = True)))
surface_map = ({'asphalt': 10, 'paved': 10, 'cobblestone': 3, 'fine_gravel': 9,
'ground': 6, 'sett': 4, 'gravel': 7, 'metal': 7, 'compacted': 9,
'dirt': 6, 'paving_stones': 7, 'grass_paver': 4, 'unpaved': 7,
'pebblestone': 7, 'concrete': 10, 'grass': 5, 'mud': 2,'sand':5,
'wood':4, 'earth':6, 'woodchips':3, 'snow':2, 'ice':2, 'salt':2})
for column in surface_cols:
surface_cols[column] = surface_cols[column].map(surface_map)
surface_cols['mean'] = np.nanmean(surface_cols, axis=1)
df['surface'] = round(surface_cols['mean'])
# maxspeed column
df.loc[df['maxspeed'] > 110, 'maxspeed'] = 110
df.loc[df['maxspeed'] < 20, 'maxspeed'] = 20
df['maxspeed'] = round(df['maxspeed'], -1)
maxspeed_map = ({20: 10, 30: 9, 40: 8, 50: 7, 60: 6,
70: 5, 80: 4, 90: 3, 100: 2, 110: 1})
df['maxspeed'] = df['maxspeed'].map(maxspeed_map)
# lanes column
df.loc[df['lanes'] > 8, 'lanes'] = 8
lanes_map = {1: 10, 2: 9, 3: 5, 4: 5, 5: 3, 6: 3, 7: 2, 8: 1}
df['lanes'] = df['lanes'].map(lanes_map)
# oneway column
oneway_map = {0: 5, 1: 10, -1:5}
df['oneway'] = df['oneway'].map(oneway_map)
# width column
df.loc[df['width'] < 2, 'width'] = 1
df.loc[df['width'] > 6, 'width'] = 6
df['width'] = round(df['width'])
width_map = ({1: 1, 2: 2, 3: 5, 4: 7, 5: 9, 6: 10})
df['width'] = df['width'].map(width_map)
# normalize centrality column (between o and 10)
df['centrality'] =((df['centrality'] - np.min(df['centrality'])) / (np.max(df['centrality']) - np.min(df['centrality']))) * 10
#Switch to new df for calculation
d_frame = df.copy(deep =True)
# Multiply variables by weights
d_frame['cycleway'] = d_frame['cycleway'] * 0.208074534
d_frame['surface'] = d_frame['surface'] * 0.108695652
d_frame['highway'] = d_frame['highway'] * 0.167701863
d_frame['maxspeed'] = d_frame['maxspeed'] * 0.189440994
d_frame['lanes'] = d_frame['lanes'] * 0.108695652
d_frame['centrality'] = d_frame['centrality'] * 0.071428571
d_frame['width'] = d_frame['width'] * 0.086956522
d_frame['oneway'] = d_frame['oneway'] * 0.059006211
d_frame['index'] = (np.nanmean(d_frame[['cycleway','highway', 'surface', 'maxspeed', 'lanes', 'width', 'oneway',
'centrality']], axis=1,dtype='float64')) * 80
d_frame['grid_index'] = np.average(d_frame['index'],weights=d_frame['length'])
dflist.append(d_frame)
dfs.append(df)
#Final statistics index of city in dictionary
df_indexes =
|
pd.concat(dflist)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 13:51:29 2021
@author: <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, \
confusion_matrix, ConfusionMatrixDisplay
df =
|
pd.read_excel('classification_features.xlsx')
|
pandas.read_excel
|
"""
This modules performs spectral analysis of EEG signals (sometimes
refered as quantitative EEG) on a mne object. EEG Sleep EEG is ideally suited
to frequency and time-frequency analysis, since different stages or
micro-elements (such as spindles, K-complexes, slow waves) have
specific frequency characteristics [1].
Three spectral analysis methods can be used for the analysis, Fast Fourier
transform, Welch and Multitaper spectrogram. Multitaper estimation tends to
be slightly better in reducing artefactual noise and is thus prefered. For an
in depth application of Multitaper analysis to EEG signals, please see [2].
This module can also be used to summarised spectral quantities overnight. For
example, absolute delta power can be calculated in each sleep stages. More
experimental metrics, such as spectral entropy of delta activity across the
night [2], are also implemented.
The code below has been also used to analyse event-related changes in EEG.
The following publications have used part of this code [3,4,5,6], and we refer
interested reader to this publication for further details on implementation
technicals.
[1] <NAME>., <NAME>., <NAME>. et al (2021). New and Emerging Approaches
to Better Define Sleep Disruption and Its Consequences.
Frontiers in Neuroscience, 15. doi:10.3389/fnins.2021.751730
[2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Sleep Neurophysiological Dynamics Through the Lens of
Multitaper Spectral Analysis. Physiology (Bethesda), 32(1),
60-92. doi:10.1152/physiol.00062.2015
[3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., . . . <NAME>. (2021). A Novel EEG Derived Measure of
Disrupted Delta Wave Activity during Sleep Predicts All-Cause Mortality Risk.
Ann Am Thorac Soc, (in press). doi:10.1513/AnnalsATS.202103-315OC
[4] <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Correspondence between physiological and behavioural responses
to vibratory stimuli during the sleep onset period: A quantitative
electroencephalography analysis. J Sleep Res, e13232. doi:10.1111/jsr.13232
[5] <NAME>., <NAME>., <NAME>. G. et al. (2021). Polysomnographic
Predictors of Treatment Response to Cognitive Behavioral Therapy for
Insomnia in Participants With Co-morbid Insomnia and Sleep Apnea:
Secondary Analysis of a Randomized Controlled Trial.
Frontiers in Psychology, 12. doi:10.3389/fpsyg.2021.676763
[6] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., EEG power spectral responses to wind farm compared to road
traffic noise during sleep: A laboratory study. (in press) J Sleep Res,
"""
import mne
import os
import numpy as np
import pandas as pd
import warnings
from psga.analyses.utils import check_is_fitted
from psga.analyses.base import BaseMethods
from psga.features.utils import power_spectrum, _psd_params_checker
from psga.hypnogram import _convert_hypno
from psga.features.time_features import compute_maximum_value_epochs, \
compute_ptp_amp, \
compute_rms_value_epochs, compute_std, compute_zero_crossings, \
compute_time_mass, \
compute_hjorth, compute_ratio_energy_time_mass
from psga.features.spectral_features import compute_absol_pow_freq_bands, \
compute_relative_pow_ratios, \
compute_hjorth_spect, compute_spect_entropy, \
compute_spect_slope, compute_spect_edge_freq
from psga.features.denoising_function import moving_average_weighted
import sys
try:
wd = sys._MEIPASS
except AttributeError:
wd = os.path.dirname(__file__)
FREQ_INIT = {'Delta': [0.5,4.5], 'Theta': [4.5, 7.0], 'Alpha': [7,12],
'Sigma': [12,16], 'Beta': [16,35]}
PSD_PARAMS_INIT = {'multitaper':
{'mt_adaptive': True, 'mt_bandwidth': 1,
'mt_low_bias':True},
'welch':{'welch_n_fft':256,
'welch_n_per_seg':None,
'welch_n_overlap':0}}
class qEEG(BaseMethods):
"""Performs quantitative EEG analysis on a mne raw object.
Power spectrum analysis is computed on consecutive X ('windows_length') of
raw EEG in the 'score' methods. Mean absolute power of a given frequency
bands can then be calculated overnight and in specific sleep stage. More
experimental metrics on the delta frequency bands [1] are also implemented.
A full list of metrics calculated can be found in XX.
This class can also be used to perform analysis of qEEG relative to a
given events in the score_events methods. Given an event dataframe,
the methods will score qEEG relative to the event onset. For more
information, please see [2,3].
Parameters
----------
windows_length : int
Length of analysis windows. Default to 5 sec.
psd_method : str
PSD methods, 'welch', 'multitaper' or 'fft'
psd_params : dict or None
Optional parameters to be passed to shai.features.utils:power_spectrum. If psd_method = 'welch', psd_params
should contain the following keys (`welch_n_fft`, `welch_n_per_seg`, `welch_n_overlap`). If psd_method
= 'multitaper', should contain the following ('mt_bandwidth','mt_adaptive','mt_low_bias'). If None,
default parameters are used.
before_event : int
Time, in seconds relative to event onset, from which to start the analysis.
after_event : int
Time, in seconds relative to event onset, from which to stop the analysis.
len_windows_event : int
Time, in seconds, of the size of the windows analysis.
save_results : bool
If true, will save the results at the given "path"
Notes
-----
References
-----
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., . . . <NAME>. (2021). A Novel EEG Derived Measure of
Disrupted Delta Wave Activity during Sleep Predicts All-Cause Mortality Risk.
Ann Am Thorac Soc, (in press). doi:10.1513/AnnalsATS.202103-315OC
[2] <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Correspondence between physiological and behavioural responses to
vibratory stimuli during the sleep onset period: A quantitative
electroencephalography analysis. J Sleep Res, e13232. doi:10.1111/jsr.13232
[3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., EEG power spectral responses to wind farm compared to road
traffic noise during sleep: A laboratory study. (in press) J Sleep Res,
"""
def __init__(self, windows_length = 5, psd_method = 'multitaper',
events_windows_length = 5, events_lower_bound = -20,
events_upper_bound = 20,
):
self.freq_bands = np.hstack([FREQ_INIT['Delta'][0],
FREQ_INIT['Delta'][1],
FREQ_INIT['Theta'][1],
FREQ_INIT['Alpha'][1],
FREQ_INIT['Sigma'][1],
FREQ_INIT['Beta'][1]])
self.psd_method = psd_method
self.psd_params = PSD_PARAMS_INIT[psd_method]
self.windows_length = windows_length
self.events_windows_length = events_windows_length
self.events_lower_bound = events_lower_bound
self.events_upper_bound = events_upper_bound
self.picks = None
super().__init__()
def set_params(self, parameters_dict, check_has_key=False):
for key, value in parameters_dict.items():
if key == 'psd_params':
value = _psd_params_checker(value,
parameters_dict['psd_method'])
if key == 'freq_bands':
self.freq_bands = value
value = np.hstack([value['Delta'][0], value['Delta'][1],
value['Theta'][1], value['Alpha'][1],
value['Sigma'][1], value['Beta'][1]])
if hasattr(self, key):
setattr(self, key, value)
else:
warnings.warn(key + ' key is not a valid attribute')
def fit(self, raw, hypnogram, picks=None):
self._check_raw(raw)
self._check_hypno(hypnogram)
if picks is not None: self.picks = picks
if self.picks is not None:
raw = raw.pick_channels(ch_names=picks)
else:
raise ValueError('No EEG channel was selected for qEEG analysis.')
self._raw = raw.filter(l_freq=0.3, h_freq=35, verbose='error')
self._hypno = _convert_hypno(hypnogram, self.windows_length)
def score(self):
"""Calculate power spectrum based metrics for each segment of length windows_size
Notes
-----
The following parameters are calculated for each segments and for each EEG channel:
- Absolute and relative power of delta,theta, alpha, sigma, and beta
bands
- 'Delta/alpha ratio, slowing ratio and REM ratio
- Maximum, RMS, SD and peak-to-peak values of EEG epochs data
- zero crossing rate of each EEG epochs
- Spectral entropy and Spectral edges (q =0.85 and 0.95)
"""
check_is_fitted(self, ['_raw', '_hypno'])
hypno = self._hypno
raw = self._raw
for channel in raw.info['ch_names']:
self._scoring[channel] = _score_qEEG(
raw, hypno, channel, tmin=0,
tmax=hypno['duration'].values[0], psd_method=self.psd_method,
psd_params=self.psd_params, freq_bands=self.freq_bands)
return self._scoring, self._epochs_data
def overnight_metrics(self, kdftype='lct2020'):
"""Calculate summary descriptive metrics of an overnight.
Calculate the mean of each metrics calculated in "qEEG.score()" for
individual sleep stages. More experimental metrics on the delta
frequency bands [1] are also implemented. A full list of metrics
calculated can be found in Notes.
Notes
-----
The following parameters are calculated for each segments:
"""
if not self._scoring:
self.score()
scoring = self._scoring.items()
metrics = {}
is_scored = True
for channel, qeeg_dict in scoring:
df =
|
pd.DataFrame.from_dict(qeeg_dict)
|
pandas.DataFrame.from_dict
|
import argparse
import os
import mlflow
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from mlflow import log_metric, log_param, get_artifact_uri
from skimage.io import imsave
from sklearn.model_selection import ParameterGrid
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import TomoDetectionDataset as Dataset
from dense_yolo import DenseYOLO
from loss import objectness_module, LocalizationLoss
from sampler import TomoBatchSampler
from transform import transforms
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device("cpu" if not torch.cuda.is_available() else args.device)
loader_train, loader_valid = data_loaders(args)
loaders = {"train": loader_train, "valid": loader_valid}
hparams_dict = {
"block_config": [(1, 3, 2, 6, 4), (2, 6, 4, 12, 8)],
"num_init_features": [8, 16],
"growth_rate": [8, 16],
"bn_size": [2, 4],
}
hparams = list(ParameterGrid(hparams_dict)) # 16 configs
loss_params_dict = [
{"loss": ["CE", "weighted-CE"], "alpha": [0.25, 0.5, 1.0]}, # 6 configs
{"loss": ["focal"], "alpha": [0.25, 0.5, 1.0], "gamma": [0.5, 1.0, 2.0]}, # 9 configs
{
"loss": ["reduced-focal"],
"alpha": [0.25, 0.5, 1.0],
"gamma": [0.5, 1.0, 2.0],
"reduce_th": [0.5],
} # 9 configs
] # 24 configs
loss_params = list(ParameterGrid(loss_params_dict))
loss_params = loss_params * 2 # 48 configs
try:
mlflow.set_tracking_uri(args.mlruns_path)
experiment_id = (
args.experiment_id
if args.experiment_id
else mlflow.create_experiment(name=args.experiment_name)
)
except Exception as _:
print("experiment-id must be unique")
return
for i, loss_param in tqdm(enumerate(loss_params)):
for j, hparam in enumerate(hparams):
with mlflow.start_run(experiment_id=experiment_id):
mlflow_log_params(loss_param, hparam)
try:
yolo = DenseYOLO(img_channels=1, out_channels=Dataset.out_channels, **hparam)
yolo.to(device)
objectness_loss = objectness_module(
name=loss_param["loss"], args=argparse.Namespace(**loss_param)
)
localization_loss = LocalizationLoss(weight=args.loc_weight)
optimizer = optim.Adam(yolo.parameters(), lr=args.lr)
early_stop = args.patience
run_tpr2 = 0.0
run_tpr1 = 0.0
run_auc = 0.0
for _ in range(args.epochs):
if early_stop == 0:
break
for phase in ["train", "valid"]:
if phase == "train":
yolo.train()
early_stop -= 1
else:
yolo.eval()
df_validation_pred = pd.DataFrame()
valid_target_nb = 0
for data in loaders[phase]:
x, y_true = data
x, y_true = x.to(device), y_true.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
y_pred = yolo(x)
obj = objectness_loss(y_pred, y_true)
loc = localization_loss(y_pred, y_true)
total_loss = obj + loc
if phase == "train":
total_loss.backward()
clip_grad_norm_(yolo.parameters(), 0.5)
optimizer.step()
else:
y_true_np = y_true.detach().cpu().numpy()
valid_target_nb += np.sum(y_true_np[:, 0])
df_batch_pred = evaluate_batch(y_pred, y_true)
df_validation_pred = df_validation_pred.append(
df_batch_pred, ignore_index=True, sort=False
)
if phase == "valid":
tpr, fps = froc(df_validation_pred, valid_target_nb)
epoch_tpr2 = np.interp(2.0, fps, tpr)
epoch_tpr1 = np.interp(1.0, fps, tpr)
if epoch_tpr2 > run_tpr2:
early_stop = args.patience
run_tpr2 = epoch_tpr2
run_tpr1 = epoch_tpr1
run_auc = np.trapz(tpr, fps)
torch.save(
yolo.state_dict(),
os.path.join(get_artifact_uri(), "yolo.pt"),
)
imsave(
os.path.join(get_artifact_uri(), "froc.png"),
plot_froc(fps, tpr),
)
log_metric("TPR2", run_tpr2)
log_metric("TPR1", run_tpr1)
log_metric("AUC", run_auc)
except Exception as e:
print(
"{:0>2d}/{} | {} {}".format(
j + 1, len(hparams), hparams[j], type(e).__name__
)
)
def mlflow_log_params(loss_param, hparam):
for key in loss_param:
log_param(key, loss_param[key])
log_param("loss_fun", str(loss_param))
for key in hparam:
log_param(key, hparam[key])
log_param("network", str(hparam))
def data_loaders(args):
dataset_train, dataset_valid = datasets(args)
sampler_train = TomoBatchSampler(
batch_size=args.batch_size, data_frame=dataset_train.data_frame
)
def worker_init(worker_id):
np.random.seed(42 + worker_id)
loader_train = DataLoader(
dataset_train,
batch_sampler=sampler_train,
num_workers=args.workers,
worker_init_fn=worker_init,
)
loader_valid = DataLoader(
dataset_valid,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
worker_init_fn=worker_init,
)
return loader_train, loader_valid
def datasets(args):
train = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="train",
random=True,
only_biopsied=args.only_biopsied,
transform=transforms(train=True),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
valid = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="validation",
random=False,
transform=transforms(train=False),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
return train, valid
def froc(df, targets_nb):
total_slices = len(df.drop_duplicates(subset=["PID"]))
total_tps = targets_nb
tpr = [0.0]
fps = [0.0]
max_fps = 4.0
thresholds = sorted(df[df["TP"] == 1]["Score"], reverse=True)
for th in thresholds:
df_th = df[df["Score"] >= th]
df_th_unique_tp = df_th.drop_duplicates(subset=["PID", "TP", "GTID"])
num_tps_th = float(sum(df_th_unique_tp["TP"]))
tpr_th = num_tps_th / total_tps
num_fps_th = float(len(df_th[df_th["TP"] == 0]))
fps_th = num_fps_th / total_slices
if fps_th > max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
break
tpr.append(tpr_th)
fps.append(fps_th)
if np.max(fps) < max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
return tpr, fps
def plot_froc(fps, tpr, color="darkorange", linestyle="-"):
fig = plt.figure(figsize=(10, 8))
canvas = FigureCanvasAgg(fig)
plt.plot(fps, tpr, color=color, linestyle=linestyle, lw=2)
plt.xlim([0.0, 4.0])
plt.xticks(np.arange(0.0, 4.5, 0.5))
plt.ylim([0.0, 1.0])
plt.yticks(np.arange(0.0, 1.1, 0.1))
plt.tick_params(axis="both", which="major", labelsize=16)
plt.xlabel("Mean FPs per slice", fontsize=24)
plt.ylabel("Sensitivity", fontsize=24)
plt.grid(color="silver", alpha=0.3, linestyle="--", linewidth=1)
plt.tight_layout()
canvas.draw()
plt.close()
s, (width, height) = canvas.print_to_buffer()
return np.fromstring(s, np.uint8).reshape((height, width, 4))
def is_tp(pred_box, true_box, min_dist=50):
# box: center point + dimensions
pred_y, pred_x = pred_box["Y"], pred_box["X"]
gt_y, gt_x = true_box["Y"], true_box["X"]
# distance between GT and predicted center points
dist = np.sqrt((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2)
# TP radius based on GT box size
dist_threshold = np.sqrt(true_box["Width"] ** 2 + true_box["Height"] ** 2) / 2.
dist_threshold = max(dist_threshold, min_dist)
# TP if predicted center within GT radius
return dist <= dist_threshold
def evaluate_batch(y_pred, y_true):
y_pred = y_pred.detach().cpu().numpy()
y_true = y_true.detach().cpu().numpy()
df_eval =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant':
|
pd.BooleanDtype()
|
pandas.BooleanDtype
|
import logging
import pandas
import pandas.util.testing as pdt
import numpy as np
import unittest
import analysis as a
class AnalysisTest(unittest.TestCase):
def setUp(self):
pass
def test_prepare_df(self):
df = pandas.DataFrame([(-10, 1, 2, 10),
(0, 1, 4, 12),
(0, 1, 5, 6),
(100, 101, 102, 130),
(100, 101, 102, 90)],
columns=['submit_time_ms', 'start_time_ms',
'mesos_start_time_ms', 'end_time_ms'])
expected_df = pandas.DataFrame([(0, 11, 12, 20, 9, 11),
(10, 11, 14, 22, 11, 1),
(10, 11, 15, 16, 5, 1),
(110, 111, 112, 140, 29, 1),
(110, 111, 112, 140, 29, 1)],
columns=[
'submit_time_ms', 'start_time_ms',
'mesos_start_time_ms', 'end_time_ms',
'run_time_ms', 'overhead'
])
pdt.assert_frame_equal(expected_df, a.prepare_df(df))
def test_running_tasks_at(self):
df = pandas.DataFrame([(0, 11, 12, 20, 9, 11),
(10, 11, 14, 22, 11, 1),
(10, 11, 15, 140, 129, 1),
(110, 111, 112, 140, 29, 1),
(110, 111, 112, 140, 29, 1)],
columns=[
'submit_time_ms', 'start_time_ms',
'mesos_start_time_ms', 'end_time_ms',
'run_time_ms', 'overhead'
])
expected_df = pandas.DataFrame([(0, 11, 12, 20, 9, 11),
(10, 11, 14, 22, 11, 1),
(10, 11, 15, 140, 129, 1),
],
columns=[
'submit_time_ms', 'start_time_ms',
'mesos_start_time_ms', 'end_time_ms',
'run_time_ms', 'overhead'
])
# don't care about index
actual_df = a.running_tasks_at(df, 12).reset_index(drop=True)
pdt.assert_frame_equal(expected_df, actual_df)
expected_df = pandas.DataFrame([(10, 11, 15, 140, 129, 1),
(110, 111, 112, 140, 29, 1),
(110, 111, 112, 140, 29, 1)],
columns=[
'submit_time_ms', 'start_time_ms',
'mesos_start_time_ms', 'end_time_ms',
'run_time_ms', 'overhead'
])
# don't care about index
actual_df = a.running_tasks_at(df, 112).reset_index(drop=True)
pdt.assert_frame_equal(expected_df, actual_df)
def test_time_series_events(self):
# tuple is (time, count, mem, cpus)
events = [(0, 1, 1, 1),
(1, -1, -1, -1),
(2, 1, 10, 10),
(3, 1, 20, 20),
(4, 1, 30, 20),
(5, -1, -50, -50),
(3, 1, 50, 50)
]
rows = [{"time_ms" : 0, "count" : 1, "mem" : 1, "cpus" : 1},
{"time_ms" : 1, "count" : 0, "mem" : 0, "cpus" : 0},
{"time_ms" : 2, "count" : 1, "mem" : 10, "cpus" : 10},
{"time_ms" : 3, "count" : 2, "mem" : 30, "cpus" : 30},
{"time_ms" : 3, "count" : 3, "mem" : 80, "cpus" : 80},
{"time_ms" : 4, "count" : 4, "mem" : 110, "cpus" : 100},
{"time_ms" : 5, "count" : 3, "mem" : 60, "cpus" : 50}
]
expected_df = pandas.DataFrame(rows)
events_df = a.time_series_events(events)
pdt.assert_frame_equal(events_df, expected_df)
def test_get_fair_allocation_one_user(self):
usage_rows = [(0, 'a', 0, 1838.0)]
usage_columns = ['time_ms', 'user','mem','mem_running']
usage_df = pandas.DataFrame(usage_rows,
columns=usage_columns)
fair_df = a.get_fair_allocation(usage_df)
fair_df = fair_df.sort_values('user').reset_index(drop=True)
expected_df = pandas.DataFrame([('a', 1838.0, 0)],
columns=['user', 'mem', 'time_ms'])
pdt.assert_frame_equal(expected_df, fair_df)
def test_get_fair_allocation_simple(self):
usage_rows = [(0, 'a', 100, 250),
(0, 'b', 0, 250),
(0, 'c', 100, 250),
(0, 'd', 1000, 250)]
usage_columns = ['time_ms', 'user','mem','mem_running']
usage_df = pandas.DataFrame(usage_rows,
columns=usage_columns)
fair_df = a.get_fair_allocation(usage_df)
fair_df = fair_df.sort_values('user').reset_index(drop=True)
expected_df = pandas.DataFrame([('a', 250, 0),
('b', 250, 0),
('c', 250, 0),
('d', 250, 0)],
columns=['user', 'mem', 'time_ms'])
pdt.assert_frame_equal(expected_df, fair_df)
def test_get_fair_allocation_complex(self):
usage_rows = [(0, 'a', 100, 250),
(0, 'b', 0, 250),
(0, 'c', 1000, 250),
(0, 'd', 1000, 1250)]
usage_columns = ['time_ms', 'user','mem','mem_running']
usage_df = pandas.DataFrame(usage_rows,
columns=usage_columns)
fair_df = a.get_fair_allocation(usage_df)
fair_df = fair_df.sort_values('user').reset_index(drop=True)
expected_df = pandas.DataFrame([('a', 350.0, 0),
('b', 250.0, 0),
('c', 700.0, 0),
('d', 700.0, 0)],
columns=['user', 'mem', 'time_ms'])
pdt.assert_frame_equal(expected_df, fair_df)
def test_sample_usage(self):
user_event_columns = ['time_ms', 'user', 'mem', 'cpus', 'count']
user_running = pandas.DataFrame([(0, 'a', 10, 1, 1),
(0, 'b', 12, 2, 2),
(1, 'a', 40, 20, 20),
(2, 'a', 20, 2, 2),
(3, 'b', 9, 1, 1),
(5, 'c', 30, 3, 3),
(6, 'c', 30, 3, 3)],
columns=user_event_columns)
user_waiting = pandas.DataFrame([(0, 'a', 10, 1, 1),
(0, 'b', 12, 2, 2),
(2, 'a', 20, 2, 2),
(3, 'b', 90, 10, 10),
(5, 'c', 30, 3, 3)],
columns=user_event_columns)
usage_columns = ['time_ms', 'user', 'mem', 'cpus', 'count',
'mem_running', 'cpus_running', 'count_running']
nan = np.NaN
expected_df = pandas.DataFrame([(0, 'a', 10, 1, 1, 10, 1, 1),
(0, 'b', 12, 2, 2, 12, 2, 2),
(0, 'c', nan, nan, nan, nan, nan, nan),
(5, 'a', 20, 2, 2, 20, 2, 2),
(5, 'b', 90, 10, 10, 9, 1, 1),
(5, 'c', 30, 3, 3, 30, 3, 3)],
columns=usage_columns)
actual_df = a.sample_usage(user_running, user_waiting, 5)
actual_df = actual_df.reset_index(drop=True)
|
pdt.assert_frame_equal(expected_df, actual_df)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 13:32:52 2018
@author: nmei
"""
if __name__ == "__main__":
import os
import pandas as pd
import numpy as np
import utils
from sklearn.model_selection import LeaveOneOut,cross_val_predict
from sklearn.utils import shuffle
from scipy import stats
# define result saving directory
dir_saving = 'results_e3'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:# the subject level processing
df1 = pd.read_csv('e3.csv').iloc[:,1:]
except: # when I test the script
df1 = pd.read_csv('../../e3.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# preallocate the data frame structure
results = dict(sub = [],
model = [],
corre = [],
window = [],
pval = [],
)
results['p(correct|awareness)'] = []
results['p(correct|unawareness)'] = []
results['p(incorrect|awareness)'] = []
results['p(incorrect|unawareness)'] = []
results['p(correct)'] = []
results['p(incorrect)'] = []
results['p(aware)'] = []
results['p(unaware)'] = []
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e3'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# ['ab', 'eb', 'er', 'hgh', 'kb', 'kj', 'mp', 'rb', 'vs', 'wp']
# get one of the participants' data
participant = 'kj'
df_sub = df[df['participant'] == participant]
# for 1-back to 4-back
for n_back in np.arange(1,5):
X,y,groups = utils.get_features_targets_groups(
df_sub.dropna(),
n_back = n_back,
names = name_for_scale,
independent_variables = feature_names,
dependent_variable = [target_name,'correctness'])
X,y,groups = shuffle(X,y,groups)
y,correctness = y[:,0],y[:,1]
for model_name,model in utils.make_clfs().items():
cv = LeaveOneOut()
print('{}-back,{}'.format(n_back,model_name))
preds = cross_val_predict(model,X,y,groups=groups,cv=cv,method='predict',verbose=2,n_jobs=4)
df_pred_ = pd.DataFrame(np.vstack([preds,correctness]).T,columns = ['preds','correct'])
p_correct = float(np.sum(correctness == 1)+1) / (len(correctness)+1)
p_incorrect = float(np.sum(correctness == 0)+1) / (len(correctness)+1)
p_aware = float(np.sum(preds == 1)+1) / (len(preds)+1)
p_unaware = float(np.sum(preds == 0)+1) / (len(preds)+1)
p_correct_aware = float(np.sum(np.logical_and(correctness == 1, preds == 1))+1) / (len(df_pred_)+1)
p_correct_unaware = float(np.sum(np.logical_and(correctness == 1, preds == 0))+1) / (len(df_pred_)+1)
p_incorrect_aware = float(np.sum(np.logical_and(correctness == 0, preds == 1))+1) / (len(df_pred_)+1)
p_incorrect_unaware = float(np.sum(np.logical_and(correctness == 0, preds == 0))+1) / (len(df_pred_)+1)
correlation,pval = stats.spearmanr(preds,correctness)
results['sub'].append(participant)
results['model'].append(model_name)
results['corre'].append(correlation)
results['pval'].append(pval)
results['p(correct|awareness)'].append(p_correct_aware/p_aware)
results['p(correct|unawareness)'].append(p_correct_unaware/p_unaware)
results['p(incorrect|awareness)'].append(p_incorrect_aware/p_aware)
results['p(incorrect|unawareness)'].append(p_incorrect_unaware/p_unaware)
results['p(correct)'].append(p_correct)
results['p(incorrect)'].append(p_incorrect)
results['p(aware)'].append(p_aware)
results['p(unaware)'].append(p_unaware)
results['window'].append(n_back)
results_to_save =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
import os
import re
import pandas as pd
import numpy as np
from copy import deepcopy
from xlsxwriter.workbook import Workbook
from xlsxwriter.worksheet import Worksheet
from .theme import Theme
from .cover import Cover
from .gptable import GPTable
from gptables.utils.unpickle_themes import gptheme
class GPWorksheet(Worksheet):
"""
Wrapper for an XlsxWriter Worksheet object. Provides a method for writing
a good practice table (GPTable) to a Worksheet.
"""
def write_cover(self, cover, sheets, auto_width):
"""
Write a cover page to the Worksheet. Uses text from a Cover object and
details of the Workbook contents.
Parameters
----------
cover : gptables.Cover
object containing cover sheet text
sheets : dict
mapping worksheet labels to gptables.GPTable objects
"""
theme = self.theme
pos = [0, 0]
pos = self._write_element(pos, cover.title, theme.cover_title_format)
pos[0] += 1
if cover.intro is not None:
pos = self._write_element(pos, "Introductory information", theme.cover_subtitle_format)
pos = self._write_element_list(pos, cover.intro, theme.cover_text_format)
pos[0] += 1
if sheets:
pos = self._write_element(pos, "Contents", theme.cover_subtitle_format)
for sheet, gptable in sheets.items():
pos = self._write_hyperlinked_toc_entry(pos, sheet)
title = self._strip_annotation_references(gptable.title)
pos = self._write_element(pos, title, theme.cover_text_format)
if cover.additional_elements is not None:
for element in cover.additional_elements:
content = getattr(gptable, element)
if element in ["subtitles", "notes"]:
content = [self._strip_annotation_references(element) for element in content]
pos = self._write_element_list(pos, content, theme.cover_text_format)
else:
content = self._strip_annotation_references(content)
pos = self._write_element(pos, content, theme.cover_text_format)
pos[1] = 0
pos[0] += 1
if cover.about is not None:
pos = self._write_element(pos, "About these data", theme.cover_subtitle_format)
pos = self._write_element_list(pos, cover.about, theme.cover_text_format)
pos[0] += 1
if cover.contact is not None:
pos = self._write_element(pos, "Contact", theme.cover_subtitle_format)
pos = self._write_element_list(pos, cover.contact, theme.cover_text_format)
pos[0] += 1
if sheets and auto_width:
max_link_len = max([len(key) for key in sheets.keys()])
first_col_width = self._excel_string_width(
max_link_len,
theme.cover_text_format.get("font_size") or 10
)
self._set_column_widths([first_col_width])
def write_gptable(self, gptable, auto_width, disable_footer_parentheses):
"""
Write data from a GPTable object to the worksheet using the workbook
Theme object for formatting.
Parameters
----------
gptable : gptables.GPTable
object containing elements of the gptable to be written to the
Worksheet
Returns
-------
None
"""
if not isinstance(gptable, GPTable):
raise TypeError("`gptable` must be a gptables.GPTable object")
gptable = deepcopy(gptable)
theme = self.theme
# Write each GPTable element using appropriate Theme attr
pos = [0, 0]
self._reference_annotations(gptable)
pos = self._write_element(
pos,
gptable.title,
theme.title_format
)
pos = self._write_element_list(
pos,
gptable.subtitles,
theme.subtitle_format
)
pos = self._write_table_elements(
pos,
gptable,
auto_width
)
if not disable_footer_parentheses:
self._enclose_footer_elements(gptable)
footer = theme.footer_order
for element in footer:
pos = getattr(self, "_write_" + element)(
pos,
getattr(gptable, element),
getattr(theme, element + "_format")
)
@staticmethod
def _strip_annotation_references(text):
"""
Strip annotation references (as $$ $$) from a str or list text element.
"""
pattern = r"\$\$.*?\$\$"
if isinstance(text, str):
no_annotations = re.sub(pattern, "", text)
elif isinstance(text, list):
no_annotations = [
re.sub(pattern, "", part)
if isinstance(part, str) else part
for part in text
]
return no_annotations
def _reference_annotations(self, gptable):
"""
Replace note references with numbered references. Acts on `title`,
`subtitles`, `table` and `notes` attributes of a GPTable. References
are numbered from top left of spreadsheet, working across each row.
Parameters
----------
gptable : gptables.GPTable
object containing data with references to notes
Returns
-------
None
"""
elements = [
"title",
"subtitles",
"scope",
"units",
"legend"
]
# Store annotation references in order detected
ordered_refs = []
# Loop through elements, replacing references in strings
for attr in elements:
attr_current = getattr(gptable, attr)
setattr(
gptable,
attr,
self._replace_reference_in_attr(
attr_current,
ordered_refs
)
)
self._reference_table_annotations(gptable, ordered_refs)
new_annotations = {}
# Add to dict in order
for n in range(len(ordered_refs)):
try:
new_annotations.update(
{n + 1: gptable.annotations[ordered_refs[n]]}
)
except KeyError:
msg = (f"`{ordered_refs[n]}` has been referenced, but is not"
" defined in GPTable.annotations")
raise KeyError(msg)
# Warn if all annotations not referenced
annotations_diff = len(gptable.annotations) - len(new_annotations)
if annotations_diff:
output_file = os.path.basename(self._workbook.filename)
msg =(f"Warning: {annotations_diff} annotations have not been"
f" referenced in {output_file}. These annotations are not"
" displayed. Use `notes` for notes without references.")
print(msg)
# Replace old notes refs
gptable.annotations = new_annotations
def _reference_table_annotations(self, gptable, ordered_refs):
"""
Reference annotations in the table column headings and index columns.
"""
table = getattr(gptable, "table")
table.columns = self._replace_reference_in_attr(
[x for x in table.columns],
ordered_refs
)
index_columns = gptable.index_columns.values()
for col in index_columns:
table.iloc[:, col] = table.iloc[:, col].apply(
lambda x: self._replace_reference_in_attr(x, ordered_refs)
)
setattr(gptable, "table", table)
def _replace_reference_in_attr(self, data, ordered_refs):
"""
Replaces references in a string or list/dict of strings. Works
recursively on list elements and dict values. Other types are returned
without modification. Updates `ordered_refs` with newly detected
references.
Parameters
----------
data : any type
object containing strings to replace references in
ordered_refs : list
list of references used so far. New references will be added to
this list in order of detection
Returns
-------
string : str
input string with references replaced with numerical reference (n),
where n is the order of appearance in the resulting document
"""
if isinstance(data, str):
data = self._replace_reference(data, ordered_refs)
if isinstance(data, list):
for n in range(len(data)):
data[n] = self._replace_reference_in_attr(
data[n],
ordered_refs
)
if isinstance(data, dict):
for key in data.keys():
data[key] = self._replace_reference_in_attr(
data[key],
ordered_refs
)
return data
@staticmethod
def _replace_reference(string, ordered_refs):
"""
Given a single string, record occurrences of new references (denoted by
flanking dollar signs [$$reference$$]) and replace with number
reference reflecting order of detection.
Parameters
----------
string : str
the string to replace references within
ordered_refs : list
list of references used so far. New references will be added to
this list in order of detection
Returns
-------
string : str
input string with references replaced with numerical reference (n),
where n is the order of appearence in the resulting document
"""
text_refs = re.findall(r"[$]{2}.*?[$]{2}", string)
dict_refs = [w.replace("$", "") for w in text_refs]
for n in range(len(dict_refs)):
if dict_refs[n] not in ordered_refs:
ordered_refs.append(dict_refs[n])
num_ref = "(" + str(ordered_refs.index(dict_refs[n]) + 1) + ")"
string = string.replace(text_refs[n], num_ref)
return string
def _enclose_footer_elements(self, gptable):
"""
Flank text footer elements with parentheses.
"""
gptable.source = self._enclose_text(gptable.source)
gptable.notes = [self._enclose_text(note) for note in gptable.notes]
gptable.legend = [
self._enclose_text(symbol) for symbol in gptable.legend
]
gptable.annotations = dict(
[("(" + str(k), v + ")") for k, v in gptable.annotations.items()]
)
@staticmethod
def _enclose_text(element):
"""
Enclose text within parentheses. Handles strings and lists
(rich strings).
"""
if isinstance(element, str):
return "(" + element + ")"
elif isinstance(element, list):
return ["("] + element + [")"]
def _write_element(self, pos, element, format_dict):
"""
Write a single text element of a GPTable to the GPWorksheet.
Parameters
----------
element : str or list
the string or list of rich string elements to be written
format_dict : dict
format to be applied to string
pos : list
the position of the worksheet cell to write the element to
Returns
-------
pos : list
new position to write next element from
"""
if element:
self._smart_write(*pos, element, format_dict)
pos[0] += 1
return pos
def _write_element_list(self, pos, element_list, format_dict):
"""
Writes a list of elements row-wise.
Parameters
----------
element_list : list
list of strings or nested list of rich string elements to write,
one per row
format_dict : dict
format to be applied to string
pos : list
the position of the worksheet cell to write the elements to
Returns
-------
pos: list
new position to write next element from
"""
if element_list:
for element in element_list:
pos = self._write_element(pos, element, format_dict)
return pos
def _write_hyperlinked_toc_entry(self, pos, sheet_name):
"""
Write a table of contents entry. Includes a hyperlink to the sheet
in the first column. Then data for that sheet in the second column.
Parameters
----------
pos : list
the position of the worksheet cell to write the elements to
sheet_name : str
name of sheet to hyperlink to
Returns
-------
pos: list
new position to write next element from
"""
theme = self.theme
link = f"internal:'{sheet_name}'!A1"
hyperlink_format = deepcopy(theme.cover_text_format)
hyperlink_format.update({"underline": True, "font_color": "blue"})
self._smart_write(
*pos,
link,
hyperlink_format,
sheet_name
)
return [pos[0] , pos[1] + 1]
def _write_source(self, pos, element, format_dict):
"""
Alias for writting footer elements by name.
"""
return self._write_element(pos, element, format_dict)
def _write_legend(self, pos, element_list, format_dict):
"""
Alias for writting footer elements by name.
"""
return self._write_element_list(pos, element_list, format_dict)
def _write_notes(self, pos, element_list, format_dict):
"""
Alias for writting footer elements by name.
"""
return self._write_element_list(pos, element_list, format_dict)
def _write_annotations(self, pos, annotations_dict, format_dict):
"""
Writes a list of ordered annotations row-wise.
Parameters
----------
notes_dict : dict
note associate with each references, as {reference: note}
format_dict : dict
format to be applied to string
pos : list
the position of the worksheet cell to write the elements to
Returns
-------
pos: list
new position to write next element from
"""
for ref, annotation in annotations_dict.items():
element = f"{ref}: {annotation}"
pos = self._write_element(pos, element, format_dict)
return pos
def _write_table_elements(self, pos, gptable, auto_width):
"""
Writes the table, scope and units elements of a GPTable. Uses the
Workbook Theme, plus any additional formatting associated with the
GPTable. Also replaces `np.nan` with the missing value marker.
Parameters
----------
gptable : gptables.GPTable
object containing the table and additional formatting data
pos : list
the position of the worksheet cell to write the units to
auto_width : bool
select if column widths should be determined automatically using
length of text in index and columns
Returns
-------
pos : list
new position to write next element from
"""
# Get theme
theme = self.theme
# Write scope
scope = gptable.scope
self._smart_write(
*pos,
scope,
theme.scope_format
)
# Write units above each col heading
pos[1] += gptable.index_levels
n_cols = len(gptable._column_headings)
units = gptable.units
if isinstance(units, str):
pos[1] += n_cols - 1
self._smart_write(
*pos,
units,
theme.units_format
)
pos[1] += 1
elif isinstance(units, list):
for n in range(n_cols):
self._smart_write(
*pos,
units[n],
theme.units_format
)
pos[1] += 1
# Reset position to left col on next row
if (units is not None) or (scope is not None):
pos[0] += 1
pos[1] = 0
## Create data array
index_levels = gptable.index_levels
index_columns = [col for col in gptable.index_columns.values()]
data = pd.DataFrame(gptable.table, copy=True)
# Create row containing column headings
data.loc[-1] = data.columns
data.index = data.index + 1
data.sort_index(inplace=True)
if not gptable.include_index_column_headings:
data.iloc[0, index_columns] = "" # Delete index col headings
## Create formats array
# pandas.DataFrame did NOT want to hold dictionaries, so be wary
formats =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 10:39:56 2018
normalization code for data matrix
ver 200319
@author: tadahaya
"""
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import time
def z_array(x):
"""
to calculate z scores
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
myu = np.mean(x,axis=0)
sigma = np.std(x,axis=0,ddof=1)
return (x - myu)/sigma
def z_pop(x,axis=0,drop=True):
"""
to calculate z scores from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether z scores are calculate in column or row
drop: boolean
whether drop inf and nan
"""
if axis==0:
myu = np.mean(x.values,axis=0)
sigma = np.std(x.values,axis=0,ddof=1)
else:
myu = np.c_[np.mean(x.values,axis=1)]
sigma = np.c_[np.std(x.values,axis=1,ddof=1)]
df = pd.DataFrame((x.values - myu)/sigma)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
def z(x,control="",drop=True):
"""
to calculate z scores based on control data from dataframe
Parameters
----------
x: a dataframe
a dataframe to be analyzed
control: string, default ""
indicates the control column name
drop: boolean
whether drop inf and nan
"""
if len(control) > 0:
print("control column name: {0}".format(control))
con = x.loc[:,x.columns.str.contains(control)]
n = len(con.columns)
print("control column No.: {0}".format(n))
if n < 3:
print("<< CAUTION >> control columns are too few: population control was employed")
return z_pop(x,axis=1,drop=drop)
else:
myu = np.c_[np.mean(con.values,axis=1)]
sigma = np.c_[np.std(con.values,axis=1,ddof=1)]
x = x.loc[:,~x.columns.str.contains(control)]
df = pd.DataFrame((x.values - myu)/sigma)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
else:
print("<< CAUTION >> no control columns: population control was employed")
return z_pop(x,axis=1,drop=drop)
def madz_array(x):
"""
to calculate MAD Z
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
med = np.median(x,axis=0)
mad = np.median(np.abs(x - med),axis=0)
return (x - med)/(1.4826*mad)
def madz_pop(x,axis=0,drop=True):
"""
to calculate MAD Z from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether MAD Z are calculate in column or row
drop: boolean
whether drop inf and nan
"""
if axis==0:
med = np.median(x.values,axis=0)
mad = np.median(np.abs(x.values - med),axis=0)
else:
med = np.c_[np.median(x.values,axis=1)]
mad = np.c_[np.median(np.abs(x.values - med),axis=1)]
df = pd.DataFrame((x.values - med)/(1.4826*mad))
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
def madz(x,control="",drop=True):
"""
to calculate MAD Z based on control data from dataframe
Parameters
----------
x: a dataframe
a dataframe to be analyzed
control: string, default ""
indicates the control column name
drop: boolean
whether drop inf and nan
"""
if len(control) > 0:
print("control column name: {0}".format(control))
con = x.loc[:,x.columns.str.contains(control)]
n = len(con.columns)
print("control column No.: {0}".format(n))
if n < 3:
print("<< CAUTION >> control columns are too few: population control was employed")
return madz_pop(x,axis=1,drop=drop)
else:
med = np.c_[np.median(con.values,axis=1)]
mad = np.c_[np.median(np.abs(con.values - med),axis=1)]
x = x.loc[:,~x.columns.str.contains(control)]
df = pd.DataFrame((x.values - med)/(1.4826*mad))
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
else:
print("<< CAUTION >> no control columns: population control was employed")
return madz_pop(x,axis=1,drop=drop)
def robz_array(x):
"""
to calculate robust z scores
Parameters
----------
x: a numpy array
a numpy array to be analyzed
"""
med = np.median(x,axis=0)
q1,q3 = np.percentile(x,[25,75],axis=0)
niqr = (q3-q1)*0.7413
return (x - med)/niqr
def robz_pop(x,axis=0,drop=True):
"""
to calculate robust z scores from dataframe
the scores employ population control
Parameters
----------
x: a dataframe
a dataframe to be analyzed
axis: 0 or 1
whether robust z scores are calculate in rows or columns
drop: boolean
whether drop inf and nan
"""
if axis==0:
med = np.median(x.values,axis=0)
q1,q3 = np.percentile(x.values,[25,75],axis=0)
else:
med = np.c_[np.median(x.values,axis=1)]
q1 = np.c_[np.percentile(x.values,25,axis=1)]
q3 = np.c_[np.percentile(x.values,75,axis=1)]
niqr = (q3-q1)*0.7413
df = pd.DataFrame((x.values - med)/niqr)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
def robz(x,control="",drop=True):
"""
to calculate robust z score based on control data from dataframe
Parameters
----------
x: a dataframe
a dataframe to be analyzed
control: string, default ""
indicates the control column name
drop: boolean
whether drop inf and nan
"""
if len(control) > 0:
print("control column name: {0}".format(control))
con = x.loc[:,x.columns.str.contains(control)]
n = len(con.columns)
print("control column No.: {0}".format(n))
if n < 3:
print("<< CAUTION >> control columns are too few: population control was employed")
return robz_pop(x,axis=1)
else:
con = x.loc[:,x.columns.str.contains(control)]
med = np.c_[np.median(con.values,axis=1)]
q1 = np.c_[np.percentile(con.values,25,axis=1)]
q3 = np.c_[np.percentile(con.values,75,axis=1)]
niqr = (q3-q1)*0.7413
x = x.loc[:,~x.columns.str.contains(control)]
df = pd.DataFrame((x.values - med)/niqr)
df.index = x.index
df.columns = x.columns
if drop:
df = df.replace(np.inf,np.nan)
df = df.replace(-np.inf,np.nan)
df = df.dropna()
return df
else:
print("<< CAUTION >> no control columns: population control was employed")
return robz_pop(x,axis=1,drop=drop)
def quantile(df,method="median"):
"""
quantile normalization of dataframe (variable x sample)
Parameters
----------
df: dataframe
a dataframe subjected to QN
method: str, default "median"
determine median or mean values are employed as the template
"""
print("quantile normalization (QN)")
df_c = df.copy() # deep copy
lst_index = list(df_c.index)
lst_col = list(df_c.columns)
n_ind = len(lst_index)
n_col = len(lst_col)
### prepare mean/median distribution
x_sorted = np.sort(df_c.values,axis=0)[::-1]
if method=="median":
temp = np.median(x_sorted,axis=1)
else:
temp = np.mean(x_sorted,axis=1)
temp_sorted = np.sort(temp)[::-1]
### prepare reference rank list
x_rank_T = np.array([rankdata(v,method="ordinal") for v in df_c.T.values])
### conversion
rank = sorted([v + 1 for v in range(n_ind)],reverse=True)
converter = dict(list(zip(rank,temp_sorted)))
converted = []
converted_ap = converted.append
print("remaining conversion count")
for i in range(n_col):
transient = [converter[v] for v in list(x_rank_T[i])]
converted_ap(transient)
rem = n_col - i
print("\r"+str(rem),end="")
np_data = np.matrix(converted).T
df2 = pd.DataFrame(np_data)
df2.index = lst_index
df2.columns = lst_col
print("")
print("use {}".format(method))
return df2
def ts_norm(df,axis=0,ts=False):
"""
normalization with total strength of each sample (columns)
Parameters
----------
df: dataframe
a dataframe subjected to ts normalization
axis: int, default 0
determine direction of normalization, row or column
0: normalization in column vector
1: normalization in row vector
ts: boolean, default "False"
whether total strength array is exported or not
"""
if axis==0:
norms = np.linalg.norm(df,axis=0)
df2 = df/norms
else:
df = df.T
norms = np.linalg.norm(df,axis=0)
df2 = df/norms
df2 = df2.T
if ts:
return df2,norms
else:
return df2
def consensus_sig(data,sep="_",position=0):
"""
to generate consensus signature
by linear combination with weightning Spearman correlation
Parameters
----------
data: a dataframe
a dataframe to be analyzed
sep: str, default "_"
separator for sample name
position: int, default 0
indicates position of sample name such as drug
"""
print("generate consensus signature (time-consuming)")
start = time.time()
col = list(data.columns)
ind = list(data.index)
samples = list(set([v.split(sep)[position] for v in col]))
samples.sort()
rank = data.rank()
df2 =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
class MovingAverageCalculator:
def __init__(self,df):
self.DataFrame = df
def SMA(self, period=20, column='close'):
df = pd.DataFrame(index=self.DataFrame.index)
df[f"sma_{period}"] = self.DataFrame[column].rolling(window=period,min_periods=1, center=False).mean()
self.DataFrame[f"sma_{period}"] = df[f"sma_{period}"]
return self.DataFrame
def EMA (self,period=20, column='close'):
df =
|
pd.DataFrame(index=self.DataFrame.index)
|
pandas.DataFrame
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(
|
Timedelta(241, unit='h')
|
pandas.Timedelta
|
import numpy as np
import pickle as pkl
import pandas as pd
import os, nltk, argparse, json
from gensim.models import Word2Vec
from tensorflow.contrib.keras import preprocessing
# Get embed_matrix(np.ndarray), word2index(dict) and index2word(dict). All of them including extra unknown word "<unk>" and padding word "<pad>", that is, returned size = param size + 2.
# embedding_model: A pre-trained gensim.models.Word2Vec model.
def build_emb_matrix_and_vocab(embedding_model, keep_in_dict=10000, embedding_size=50):
# 0 th element is the default one for unknown words, and keep_in_dict+1 th element is used as padding.
emb_matrix = np.zeros((keep_in_dict+2, embedding_size))
word2index = {}
index2word = {}
for k in range(1, keep_in_dict+1):
word = embedding_model.wv.index2word[k-1]
word2index[word] = k
index2word[k] = word
emb_matrix[k] = embedding_model[word]
word2index['<unk>'] = 0
index2word[0] = '<unk>'
word2index['<pad>'] = keep_in_dict+1
index2word[keep_in_dict+1] = '<pad>'
return emb_matrix, word2index, index2word
# Get an sentence (list of words) as list of index. All words change into lower form.
def __sent2index(wordlist, word2index):
wordlist = [word.lower() for word in wordlist]
sent_index = [word2index[word] if word in word2index else 0 for word in wordlist]
return sent_index
# Read data from directory <data_dir>, return a list (text) of list (sent) of list (word index).
def __gen_data_imdbv1(data_dir, word2index, forHAN):
data = []
for filename in os.listdir(data_dir):
file = os.path.join(data_dir, filename)
with open(file) as f:
content = f.readline()
if forHAN:
sent_list = nltk.sent_tokenize(content)
sents_word = [nltk.word_tokenize(sent) for sent in sent_list]
sents_index = [__sent2index(wordlist, word2index) for wordlist in sents_word]
data.append(sents_index)
else:
word_list = nltk.word_tokenize(content)
words_index = __sent2index(word_list, word2index)
data.append(words_index)
return data
# Read data from directory <data_dir>, return a list (text) of list (sent) of list (word index).
def __gen_data_scdata(data_file, word2index, forHAN, for_infer=False):
data = []
label = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
jsob = json.loads(line)
if not for_infer:
label.append(int(jsob['label']))
content = jsob['text']
if forHAN:
sent_list = nltk.sent_tokenize(content)
sents_word = [nltk.word_tokenize(sent) for sent in sent_list]
sents_index = [__sent2index(wordlist, word2index) for wordlist in sents_word]
data.append(sents_index)
else:
word_list = nltk.word_tokenize(content)
words_index = __sent2index(word_list, word2index)
data.append(words_index)
return data, label
# Pass in indexed dataset, padding and truncating to corresponding length in both text & sent level.
# return data_formatted(after padding&truncating), text_lens(number of sents), text_sent_lens(number of words in each sents inside the text)
def preprocess_text_HAN(data, max_sent_len, max_text_len, keep_in_dict=10000):
text_lens = [] # how many sents in each text
text_sent_lens = [] # a list of list, how many words in each no-padding sent
data_formatted = [] # padded and truncated data
for text in data:
# 1. text_lens
sent_lens = [len(sent) for sent in text]
text_len = len(sent_lens)
text_right_len = min(text_len, max_text_len)
text_lens.append(text_right_len)
# 2. text_sent_lens & data_formatted
sent_right_lens = [min(sent_len, max_sent_len) for sent_len in sent_lens]
text_formatted = preprocessing.sequence.pad_sequences(text, maxlen=max_sent_len, padding="post", truncating="post", value=keep_in_dict+1)
# sent level's padding & truncating are both done, here are padding and truncating in text level below.
lack_text_len = max_text_len - text_len
if lack_text_len > 0:
# padding
sent_right_lens += [0]*lack_text_len
extra_rows = np.full((lack_text_len, max_sent_len), keep_in_dict+1) # complete-paddinged sents
text_formatted_right_len = np.append(text_formatted, extra_rows, axis=0)
elif lack_text_len < 0:
# truncating
sent_right_lens = sent_right_lens[:max_text_len]
row_index = [max_text_len+i for i in list(range(0, -lack_text_len))]
text_formatted_right_len = np.delete(text_formatted, row_index, axis=0)
else:
# exactly, nothing to do
text_formatted_right_len = text_formatted
text_sent_lens.append(sent_right_lens)
data_formatted.append(text_formatted_right_len)
return data_formatted, text_lens, text_sent_lens
# Pass in indexed dataset, padding and truncating to corresponding length in sent level.
# return data_formatted(after padding&truncating), sent_lens(number of words inside the sent)
def preprocess_text(data, max_sent_len, keep_in_dict=10000):
# 1. sent_lens
sent_lens = []
for sent in data:
sent_len = len(sent)
sent_right_len = min(sent_len, max_sent_len)
sent_lens.append(sent_right_len)
#2. data_formatted
data_formatted = preprocessing.sequence.pad_sequences(data, maxlen=max_sent_len, padding="post", truncating="post", value=keep_in_dict+1)
#print(type(data_formatted))
data_formatted = list(data_formatted)
return data_formatted, sent_lens
# do all things above and save.
def imdbv1(working_dir="../data/aclImdb", forHAN=False):
#============================================================
# 1. embedding matrix, word2index table, index2word table
#============================================================
fname = os.path.join(working_dir, "imdb_embedding")
if os.path.isfile(fname):
embedding_model = Word2Vec.load(fname)
else:
print("please run gen_word_embeddings.py first to generate embeddings!")
exit(1)
print("generate word2index and index2word, get corresponding-sized embedding maxtrix...")
emb_matrix, word2index, index2word = build_emb_matrix_and_vocab(embedding_model)
#================================================================
# 2. indexed dataset: number/int representation, not string
#================================================================
print("tokenizing and word-index-representing...")
train_dir = os.path.join(working_dir, "train")
train_pos_dir = os.path.join(train_dir, "pos")
train_neg_dir = os.path.join(train_dir, "neg")
test_dir = os.path.join(working_dir, "test")
test_pos_dir = os.path.join(test_dir, "pos")
test_neg_dir = os.path.join(test_dir, "neg")
train_pos_data = __gen_data_imdbv1(train_pos_dir, word2index, forHAN)
train_neg_data = __gen_data_imdbv1(train_neg_dir, word2index, forHAN)
train_data = train_neg_data + train_pos_data
test_pos_data = __gen_data_imdbv1(test_pos_dir, word2index, forHAN)
test_neg_data = __gen_data_imdbv1(test_neg_dir, word2index, forHAN)
test_data = test_neg_data + test_pos_data
#================================
# 3. padding and truncating
#================================
print("padding and truncating...")
if forHAN:
x_train, train_text_lens, train_text_sent_lens = preprocess_text_HAN(train_data, max_sent_length, max_text_length)
x_test, test_text_lens, test_text_sent_lens = preprocess_text_HAN(test_data, max_sent_length, max_text_length)
else:
x_train, train_sent_lens = preprocess_text(train_data, max_sent_length)
x_test, test_sent_lens = preprocess_text(test_data, max_sent_length)
y_train = [0]*len(train_neg_data)+[1]*len(train_pos_data)
y_test = [0]*len(test_neg_data)+[1]*len(test_pos_data)
#===============
# 4. saving
#===============
print("save word embedding matrix...")
emb_filename = os.path.join(working_dir, "emb_matrix")
pkl.dump([emb_matrix, word2index, index2word], open(emb_filename, "wb"))
print("save data for training...")
if forHAN:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_text_lens, 'sents_length':train_text_sent_lens})
else:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_sent_lens})
train_filename = os.path.join(working_dir, "train_df_file")
df_train.to_pickle(train_filename)
print("save data for testing...")
if mode=="han":
df_test = pd.DataFrame({'text':x_test, 'label':y_test, 'text_length':test_text_lens, 'sents_length':test_text_sent_lens})
else:
df_test =
|
pd.DataFrame({'text':x_test, 'label':y_test, 'text_length':test_sent_lens})
|
pandas.DataFrame
|
import statistics
import json
import csv
from pathlib import Path
from promise.utils import deprecated
from scipy import stats
from .core import should_process, rename_exp
from .core import get_test_fitness
from .core import sort_algorithms
from .core import rename_alg
from .plotting import plot_twinx
import stac
import scipy.stats as ss
import scikit_posthocs as sp
def update_wdl(exp_data, wdltable, rename_map, wdltable_exp_names, exp_name, *,
base_line='WithoutKnowledge', num_generations=50):
"""
Computes the Win-Draw-Loss of experiment results given with the experiement data.
The function does not return any value but updates the input argument 'wdltable'
"""
# wins = 0
# draws = 0
# loses = 0
if not base_line:
return
generation = num_generations - 1
bmean = statistics.mean(list(exp_data[base_line][generation].values()))
for alg in exp_data:
if alg == base_line:
continue
renamed_alg = rename_alg(alg, rename_map)
if not renamed_alg in wdltable:
# wins, draws, losses, missing
wdltable[renamed_alg] = [0, 0, 0, 0]
if renamed_alg not in wdltable_exp_names:
# wins, draws, losses, missing
wdltable_exp_names[renamed_alg] = [[], [], [], []]
try:
mean = statistics.mean(list(exp_data[alg][generation].values()))
# base_mean = statistics.mean(list(exp_data[base_line][generation].values()))
except KeyError as e:
print(alg, e)
wdltable[renamed_alg][3] += 1
wdltable_exp_names[renamed_alg][3].append(exp_name)
continue
if len(list(exp_data[base_line][generation].values())) != len(list(exp_data[alg][generation].values())):
print("Len of ", alg, "(", len(list(exp_data[alg][generation].values())), ") is not 30.")
wdltable[renamed_alg][3] += 1
wdltable_exp_names[renamed_alg][3].append(exp_name)
# continue
alg_len = len(list(exp_data[alg][generation].values()))
pval_wo_wil = stats.wilcoxon(list(exp_data[base_line][generation].values())[:alg_len],
list(exp_data[alg][generation].values()))[1]
if pval_wo_wil < 0.05:
if mean < bmean:
wdltable[renamed_alg][0] += 1
wdltable_exp_names[renamed_alg][0].append(exp_name)
else:
wdltable[renamed_alg][2] += 1
wdltable_exp_names[renamed_alg][2].append(exp_name)
else:
wdltable[renamed_alg][1] += 1
wdltable_exp_names[renamed_alg][1].append(exp_name)
def wdl(dirbase, experiments, inclusion_filter, exclusion_filter, rename_map, *, base_line='WithoutKnowledge',
num_generations=50, dump_file=Path('./wdl')):
"""
Computes the Win-Draw-Loss statistics of algorithms compared to a baseline. The function saves
the stats to a JSON and CSV files and also returns them. This function reads the fitness values
from the 'dirbase' location.
Usage: wdl(dirbase, experiments, inclusion_filter, exclusion_filter, dump_file=output_folder / 'wdl', rename_map=rename_map)
"""
wdltable = {}
wdltable_exp_name = {}
for exp in experiments:
print('WDL: processing', dirbase / exp)
exp_data = get_test_fitness(dirbase / exp, inclusion_filter, exclusion_filter, num_generations=num_generations)
update_wdl(exp_data, wdltable, rename_map, wdltable_exp_name, exp, base_line=base_line,
num_generations=num_generations)
with open(str(dump_file) + '.json', 'w') as file:
json.dump(wdltable, file, indent=4)
print('WDL: results saved to:', dump_file)
with open(str(dump_file) + '-expnames.json', 'w') as file:
json.dump(wdltable_exp_name, file, indent=4)
with open(str(dump_file) + '.csv', 'w', newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in wdltable.items():
writer.writerow([key, *value])
return wdltable, wdltable_exp_name
def wdl2(experiment_data, rename_map, *, base_line='WithoutKnowledge', num_generations=50, dump_file=Path('./wdl')):
"""
Computes the Win-Draw-Loss statistics of algorithms compared to a baseline. The function saves
the stats to a JSON and CSV files and also returns them. The function does not read fitness data
from files and treats 'experiment_data' as a dictionary that contains fitness information for
each experiment.
Usage: wdl2(experiment_data, dump_file=output_folder / 'wdl', rename_map=rename_map)
"""
wdltable = {}
wdltable_exp_name = {}
for exp in experiment_data:
print('WDL2: processing', exp)
exp_data = experiment_data[exp]
update_wdl(exp_data, wdltable, rename_map, wdltable_exp_name, exp, base_line=base_line,
num_generations=num_generations)
with open(str(dump_file) + '.json', 'w') as file:
json.dump(wdltable, file, indent=4)
print('WDL2: results saved to:', dump_file)
with open(str(dump_file) + '-expnames.json', 'w') as file:
json.dump(wdltable_exp_name, file, indent=4)
# print('WDL2: results saved to:', dump_file)
with open(str(dump_file) + '.csv', 'w', newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in wdltable.items():
writer.writerow([key, *value])
return wdltable, wdltable_exp_name
import pandas as pd
def friedman_test2(test_fitness, ignore_list=[]):
if len(test_fitness) < 3:
return -1, [], []
data = []
alg_names = []
for alg in test_fitness:
if alg in ignore_list:
continue
data.append(list(test_fitness[alg]))
alg_names.append(alg)
_, p, rank, pivot = stac.nonparametric_tests.friedman_test(*data)
post = {}
ctr = 0
for alg in test_fitness:
if alg in ignore_list:
continue
post[alg] = (pivot[ctr])
ctr = ctr + 1
names, z_values, p_values, adjusted_pval = stac.nonparametric_tests.nemenyi_multitest(post)
return p, list(zip(alg_names, rank)), list(zip(names, adjusted_pval)), list(zip(alg_names, z_values)), list(zip(alg_names, p_values))
def summary(dirbase, experiments, inclusion_filter, exclusion_filter, rename_map,
*, num_generations=50, dump_file=Path('./wdl'), baseline_alg):
def summarise(test_fit):
mini = round(min(list(test_fit.values())), 2)
maxi = round(max(list(test_fit.values())), 2)
mean = round(statistics.mean(list(test_fit.values())), 2)
std = round(statistics.stdev(list(test_fit.values())), 2)
median = round(statistics.median(list(test_fit.values())), 2)
return mini, maxi, mean, std, median
def pval(fitness, alg, generation):
if not baseline_alg or alg == baseline_alg:
pval_wo_wil = '--'
pval_wo_t = '--'
else:
if len(list(fitness[baseline_alg][generation].values())) != len(list(fitness[alg][generation].values())):
alg_len = len(list(fitness[alg][generation].values()))
print("Warning: Len of ", alg, "(", alg_len, ") is not 30. Test is done for this length.")
try:
pval_wo_wil = stats.wilcoxon(list(fitness[baseline_alg][generation].values())[:alg_len],
list(fitness[alg][generation].values()))[1]
pval_wo_t = stats.ttest_rel(list(fitness[baseline_alg][generation].values())[:alg_len],
list(fitness[alg][generation].values()))[1]
except ValueError:
pval_wo_t = -1
pval_wo_wil = -1
else:
pval_wo_wil = \
stats.wilcoxon(list(fitness[baseline_alg][generation].values()),
list(fitness[alg][generation].values()))[1]
pval_wo_wil = round(pval_wo_wil, 2)
pval_wo_t = \
stats.ttest_rel(list(fitness[baseline_alg][generation].values()),
list(fitness[alg][generation].values()))[
1]
pval_wo_t = round(pval_wo_t, 2)
return pval_wo_wil, pval_wo_t
def friedman_test(fitness, generation):
if len(fitness) < 3:
return -1, [], []
data = []
alg_names = []
for algorithm in fitness:
# if alg == baseline or len(test_fitness[alg][gen].values()) != 30:
if len(fitness[algorithm][generation].values()) != 30:
continue
data.append(list(fitness[algorithm][generation].values()))
alg_names.append(algorithm)
_, p, rank, pivot = stac.nonparametric_tests.friedman_test(*data)
post = {}
ctr = 0
for alg in fitness:
# if alg == baseline or len(test_fitness[alg][gen].values()) != 30:
if len(fitness[alg][generation].values()) != 30:
continue
post[alg] = (pivot[ctr])
ctr = ctr + 1
names, _, _, adjusted_pval = stac.nonparametric_tests.nemenyi_multitest(post)
return p, list(zip(alg_names, rank)), list(zip(names, adjusted_pval))
test_summary_table = {}
best_summary_table = {}
test_data_table = {}
best_data_table = {}
test_fri_table = {}
best_fri_table = {}
gen = 49
def do_summary(fitness, generation):
smmry_table = {}
fri_table = friedman_test(fitness, generation)
for algo in best_fitness:
mini, maxi, mean, std, median = summarise(fitness[algo][generation])
pval_wo_wil, pval_wo_t = pval(fitness, algo, generation)
if algo not in smmry_table:
smmry_table[algo] = {}
smmry_table[algo]['Average'] = mean
smmry_table[algo]['Stdev'] = std
smmry_table[algo]['min'] = mini
smmry_table[algo]['max'] = maxi
smmry_table[algo]['median'] = median
smmry_table[algo]['pval_wo_t'] = pval_wo_t
smmry_table[algo]['pval_wo_wil'] = pval_wo_wil
return smmry_table, fri_table
for exp in experiments:
print('Summary: processing', dirbase / exp)
test_fitness, best_fitness = get_test_fitness(dirbase / exp, inclusion_filter, exclusion_filter,
num_generations=num_generations)
test_data_table[exp] = test_fitness
best_data_table[exp] = best_fitness
test_summary_table[exp], test_fri_table[exp] = do_summary(test_fitness, gen)
best_summary_table[exp], best_fri_table[exp] = do_summary(best_fitness, -1)
return test_summary_table, test_data_table, test_fri_table, best_summary_table, best_data_table, best_fri_table
def save_stat2(summary_table, output_folder, rename_map, fried_table):
def calc_wdl(fried, base):
win, draw, loss = {}, {}, {}
for xp in fried:
# if fried[xp][0] >= 0.05:
# d = d + 1
# continue
for comparison in fried[xp][2]:
if base not in comparison[0]:
continue
print(comparison)
pval = comparison[1]
vs = comparison[0].replace(base, '').replace(' vs ', '')
ren_vs = rename_alg(vs, rename_map)
if pval >= 0.05:
draw[ren_vs] = draw.get(ren_vs, 0) + 1
continue
if summary_table[xp][base]['Average'] <= summary_table[xp][vs]['Average']:
win[ren_vs] = win.get(ren_vs, 0) + 1
else:
loss[ren_vs] = loss.get(ren_vs, 0) + 1
return win, draw, loss
def calc_wdl2(fried, alg1, alg2):
"""
Compares alg1 against alg2
:param fried: the friedman table
:param alg1: the baseline algorithm
:param alg2: the algorithm that alg1 is compared against.
:return: (wins, draws, losses) of alg1 against alg2
"""
win, draw, loss = 0, 0, 0
for xp in fried:
# if fried[xp][0] >= 0.05:
# d = d + 1
# continue
for comparison in fried[xp][2]:
if alg1 not in comparison[0]:
continue
if alg2 not in comparison[0]:
continue
pval = comparison[1]
# ren_alg1 = rename_alg(alg1, rename_map)
# ren_alg2 = rename_alg(alg2, rename_map)
if pval >= 0.05:
draw = draw + 1
continue
if summary_table[xp][alg1]['Average'] <= summary_table[xp][alg2]['Average']:
win = win + 1
else:
loss = loss + 1
return win, draw, loss
def fried_on_average(ave_df):
pval, ranks, posthoc, z_values, p_values = friedman_test2(ave_df.T.to_dict('list'))
ranks = {rename_alg(rank[0], rename_map): round(rank[1], 2) for rank in ranks}
z_values = {rename_alg(z[0], rename_map): z[1] for z in z_values}
p_values = {rename_alg(p[0], rename_map): p[1] for p in p_values}
ranks['p-val'] = pval
pd.Series(ranks).to_csv(output_folder / 'mean_table_ranks.csv')
pd.Series(ranks).to_latex(output_folder / 'mean_table_ranks.tex')
|
pd.Series(z_values)
|
pandas.Series
|
import pandas as pd
from datetime import datetime
from pyfinlab import portfolio_optimization as opt
"""
These functions generate formatted Excel files.
"""
def generate_excel_report(
optimized_portfolios, risk_weightings, results, backtest_timeseries, cash_focus, risk_focus, periodic_stats,
label='large_acct'
):
"""
Generates a formatted Excel portfolio optimization analysis.
:param optimized_portfolios: (pd.DataFrame) Cash-weightings for efficient frontier portfolios.
:param risk_weightings: (pd.DataFrame) Risk-weightings for efficient frontier portfolios.
:param results: (pd.DataFrame) Risk, return, and sharpe ratio for all efficient frontier portfolios. Input the
results pd.DataFrame computed using the opt.optimize_portfolio() function.
:param backtest_timeseries: (pd.DateFrame) Backtest timeseries of portfolio values.
:cash_focus: (pd.DataFrame) Cash-weightings weighted by group for efficient frontier portfolios.
:risk_focus: (pd.DataFrame) Risk-weightings weighted by group for efficient frontier portfolios.
:periodic_stats: (pd.DataFrame) Periodic stats computed using the performance.compile_periodic_stats() function
from pyfinlab library.
:label: (str) Label added to filename as a descriptor.
:return: (obj) Creates Excel workbook objects and saves detailed, formatted results of portfolio optimization.
"""
report_description = 'optimized_portfolios'
today = datetime.today().strftime('%m-%d-%Y')
filename = '../excel/{}_{}_{}.xlsx'.format(report_description, today, label)
cash_portfolios = optimized_portfolios.loc[~(optimized_portfolios.iloc[:, 10:]==0).all(axis=1)]
cash_portfolios.index.name = 'TICKER'
risk_portfolios = risk_weightings.loc[~(risk_weightings.iloc[:, 10:]==0).all(axis=1)]
dashboard = results.append(opt.cash_focus(optimized_portfolios).get('ASSET_CLASS'))
dashboard.index.name = 'Dashboard'
# Create mew Excel file.
writer =
|
pd.ExcelWriter(filename, engine='xlsxwriter')
|
pandas.ExcelWriter
|
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.regression import AutoSklearnRegressor
from autosklearn.metrics import make_scorer
from .classification import get_X_Y_data, split_dataset, get_random_filename
from sklearn import metrics
import pandas as pd
import os
from settings.default import MEDIA_DIR
def auto_classfication(params):
clf = AutoSklearnClassifier()
clf.set_params(**params)
return clf
def auto_regession(params):
clf = AutoSklearnRegressor()
clf.set_params(**params)
return clf
def auto_class_result(label, features, clf, dataset):
X, Y = get_X_Y_data(dataset, label, features)
x_train, x_test, y_train, y_test = split_dataset(X, Y, 0.3)
clf.fit(x_train, y_train)
y_prediciton = clf.predict(x_test)
#socer = make_scorer('f1', metrics.f1_score(y_test, y_prediciton))
result = {}
result['acc'] = metrics.accuracy_score(y_test, y_prediciton)
#result['f1'] = socer
x_test = x_test.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
result_file = pd.concat([x_test, y_test,
pd.DataFrame(y_prediciton, columns=['Y_pred'])], axis=1)
filename = get_random_filename(20)
result['filename'] = filename
result_file.to_excel(os.path.join(MEDIA_DIR, 'result', result['filename']), index=False)
return result
def auto_reg_result(label, features, clf, dataset):
X, Y = get_X_Y_data(dataset, label, features)
x_train, x_test, y_train, y_test = split_dataset(X, Y, 0.3)
clf.fit(x_train, y_train)
result = {}
y_prediciton = clf.predict(x_test)
x_test = x_test.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
result_file = pd.concat([x_test, y_test,
|
pd.DataFrame(y_prediciton, columns=['Y_pred'])
|
pandas.DataFrame
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons Public API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import datetime
import json
from itertools import product
from . import _auth
import pandas as pd
_PLACES = ('City', 'County', 'State', 'Country', 'Continent')
_CLIENT_ID = ('66054275879-a0nalqfe2p9shlv4jpra5jekfkfnr8ug.apps.googleusercontent.com')
_CLIENT_SECRET = '<KEY>'
_API_ROOT = 'https://datcom-api.appspot.com'
_MICRO_SECONDS = 1000000
_EPOCH_START = datetime.datetime(year=1970, month=1, day=1)
def _year_epoch_micros(year):
"""Get the timestamp of the start of a year in micro seconds.
Args:
year: An integer number of the year.
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime(year=year, month=1, day=1)
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
def _date_epoch_micros(date_string):
"""Get the timestamp of the date string in micro seconds.
Args:
date_string: An string of date
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime.strptime(date_string, '%Y-%m-%d')
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
class Client(object):
"""Provides Data Commons API."""
def __init__(self,
client_id=_CLIENT_ID,
client_secret=_CLIENT_SECRET,
api_root=_API_ROOT):
self._service = _auth.do_auth(client_id, client_secret, api_root)
response = self._service.get_prop_type(body={}).execute()
self._prop_type = defaultdict(dict)
self._inv_prop_type = defaultdict(dict)
for t in response.get('type_info', []):
self._prop_type[t['node_type']][t['prop_name']] = t['prop_type']
if t['prop_type'] != 'Text':
self._inv_prop_type[t['prop_type']][t['prop_name']] = t['node_type']
self._inited = True
def query(self, datalog_query, max_rows=100):
"""Performs a query returns results as a table.
Args:
datalog_query: string representing datalog query in [TODO(shanth): link]
max_rows: max number of returned rows.
Returns:
A pandas.DataFrame with the selected variables in the query as the
the column names. If the query returns multiple values for a property then
the result is flattened into multiple rows.
Raises:
RuntimeError: some problem with executing query (hint in the string)
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
try:
response = self._service.query(body={
'query': datalog_query,
'options': {
'row_count_limit': max_rows
}
}).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to execute query: %s' % e)
header = response.get('header', [])
rows = response.get('rows', [])
result_dict = {header: [] for header in header}
for row in rows:
cells = row.get('cells', [])
if len(cells) != len(header):
raise RuntimeError(
'Response #cells mismatches #header: {}'.format(response))
cell_values = []
for key, cell in zip(header, cells):
if not cell:
cell_values.append([''])
else:
try:
cell_values.append(cell['value'])
except KeyError:
raise RuntimeError('No value in cell: {}'.format(row))
# Iterate through the cartesian product to flatten the query results.
for values in product(*cell_values):
for idx, key in enumerate(header):
result_dict[key].append(values[idx])
return pd.DataFrame(result_dict)[header]
def expand(self,
pd_table,
arc_name,
seed_col_name,
new_col_name,
outgoing=True,
max_rows=100):
"""Create a new column with values for the given property.
The existing pandas dataframe should include a column containing entity IDs
for a certain schema.org type. This function populates a new column with
property values for the entities and adds additional rows if a property has
repeated values.
Args:
pd_table: Pandas dataframe that contains entity information.
arc_name: The property to add to the table.
seed_col_name: The column name that contains entity (ids) that the added
properties belong to.
new_col_name: New column name.
outgoing: Set this flag if the property points away from the entities
denoted by the seed column.
max_rows: The maximum number of rows returned by the query results.
Returns:
A pandas.DataFrame with the additional column and rows added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type != 'Text', 'Parent entity should not be Text'
# Determine the new column type
if outgoing:
if arc_name not in self._prop_type[seed_col_type]:
raise ValueError(
'%s does not have outgoing property %s' % (seed_col_type, arc_name))
new_col_type = self._prop_type[seed_col_type][arc_name]
else:
if arc_name not in self._inv_prop_type[seed_col_type]:
raise ValueError(
'%s does not have incoming property %s' % (seed_col_type, arc_name))
new_col_type = self._inv_prop_type[seed_col_type][arc_name]
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
# All entries in the seed column are empty strings. The new column should
# contain no entries.
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = new_col_type
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
if outgoing:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?node ?{new_col_var}').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
else:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?{new_col_var} ?node').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
new_col_type,
max_rows=max_rows)
# ----------------------- OBSERVATION QUERY FUNCTIONS -----------------------
def get_instances(self, col_name, instance_type, max_rows=100):
"""Get a list of instance dcids for a given type.
Args:
col_name: Column name for the returned column.
instance_type: String of the instance type.
max_rows: Max number of returend rows.
Returns:
A pandas.DataFrame with instance dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{col_name},'
'typeOf ?node {instance_type},'
'dcid ?node ?{col_name}').format(
col_name=col_name, instance_type=instance_type)
type_row = pd.DataFrame(data=[{col_name: instance_type}])
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query\n%s\ngot an error:\n%s' % (query, e))
return pd.concat([type_row, dcid_column], ignore_index=True)
def get_populations(self,
pd_table,
seed_col_name,
new_col_name,
population_type,
max_rows=100,
**kwargs):
"""Create a new column with population dcid.
The existing pandas dataframe should include a column containing entity IDs
for geo entities. This function populates a new column with
population dcid corresponding to the geo entity.
Args:
pd_table: Pandas dataframe that contains geo entity dcids.
seed_col_name: The column name that contains entity (ids) that the added
properties belong to.
new_col_name: New column name.
population_type: Population type like "Person".
max_rows: The maximum number of rows returned by the query results.
**kwargs: keyword properties to define the population.
Returns:
A pandas.DataFrame with an additional column added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type != 'Text', 'Parent entity should not be Text'
# Create the datalog query for the requested observations
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = 'Population'
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'typeOf ?pop Population,'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'location ?pop ?node,'
'dcid ?pop ?{new_col_var},'
'populationType ?pop {population_type},').format(
new_col_var=new_col_var,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
dcids=dcids,
population_type=population_type)
pv_pairs = sorted(kwargs.items())
idx = 0
for idx, pv in enumerate(pv_pairs, 1):
query += 'p{} ?pop {},'.format(idx, pv[0])
query += 'v{} ?pop {},'.format(idx, pv[1])
query += 'numConstraints ?pop {}'.format(idx)
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
'Population',
max_rows=max_rows)
def get_observations(self,
pd_table,
seed_col_name,
new_col_name,
start_date,
end_date,
measured_property,
stats_type,
max_rows=100):
"""Create a new column with values for an observation of the given property.
The existing pandas dataframe should include a column containing entity IDs
for a certain schema.org type. This function populates a new column with
property values for the entities.
Args:
pd_table: Pandas dataframe that contains entity information.
seed_col_name: The column that contains the population dcid.
new_col_name: New column name.
start_date: The start date of the observation (in 'YYY-mm-dd' form).
end_date: The end date of the observation (in 'YYY-mm-dd' form).
measured_property: observation measured property.
stats_type: Statistical type like "Median"
max_rows: The maximum number of rows returned by the query results.
Returns:
A pandas.DataFrame with an additional column added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type == 'Population' or seed_col_type == 'City', (
'Parent entity should be Population' or 'City')
# Create the datalog query for the requested observations
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = 'Observation'
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?pop {seed_col_type},'
'typeOf ?o Observation,'
'dcid ?pop {dcids},'
'dcid ?pop ?{seed_col_var},'
'observedNode ?o ?pop,'
'startTime ?o {start_time},'
'endTime ?o {end_time},'
'measuredProperty ?o {measured_property},'
'{stats_type}Value ?o ?{new_col_var},').format(
seed_col_type=seed_col_type,
new_col_var=new_col_var,
seed_col_var=seed_col_var,
dcids=dcids,
measured_property=measured_property,
stats_type=stats_type,
start_time=_date_epoch_micros(start_date),
end_time=_date_epoch_micros(end_date))
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
'Observation',
max_rows=max_rows)
# -------------------------- CACHING FUNCTIONS --------------------------
def read_dataframe(self, file_name):
"""Read a previously saved pandas dataframe.
User can only read previously saved data file with the same authentication
email.
Args:
file_name: The saved file name.
Returns:
A pandas dataframe.
Raises:
RuntimeError: when failed to read the dataframe.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
try:
response = self._service.read_dataframe(file_name=file_name).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to read dataframe: {}'.format(e))
return pd.read_json(json.loads(response['data']), dtype=False)
def save_dataframe(self, pd_dataframe, file_name):
"""Saves pandas dataframe for later retrieving.
Each aunthentication email has its own scope for saved dataframe. Write
with same file_name overwrites previously saved dataframe.
Args:
pd_dataframe: A pandas.DataFrame.
file_name: The saved file name.
Raises:
RuntimeError: when failed to save the dataframe.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
data = json.dumps(pd_dataframe.to_json())
try:
response = self._service.save_dataframe(body={
'data': data,
'file_name': file_name
}).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to save dataframe: {}'.format(e))
return response['file_name']
# -------------------------- OTHER QUERY FUNCTIONS --------------------------
def get_cities(self, state, new_col_name, max_rows=100):
"""Get a list of city dcids in a given state.
Args:
state: Name of the state name.
new_col_name: Column name for the returned city column.
max_rows: Max number of returend rows.
Returns:
A pandas.DataFrame with city dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{new_col_name},'
'typeOf ?node City,'
'dcid ?node ?{new_col_name},'
'containedInPlace ?node ?county,'
'containedInPlace ?county ?state,'
'name ?state "{state}"').format(
new_col_name=new_col_name, state=state)
type_row =
|
pd.DataFrame(data=[{new_col_name: 'City'}])
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import numpy as np
import os
from datetime import datetime
def create_sequence_properties_dataframe(sequences):
print("---- Creating properties for the all data. This may take a few mins depending on data size ----")
params = ['sequence', 'aa_counts', 'aa_percentages', 'molecular_weight', 'aromaticity', 'instability_index',
'isoelectric_point', 'sec_struc', 'helix', 'turn', 'sheet', 'epsilon_prot', 'with_reduced_cysteines',
'with_disulfid_bridges', 'gravy', 'flexibility','net_charge_at_pH7point4', 'length']
seq_properties =
|
pd.DataFrame(columns=params)
|
pandas.DataFrame
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected =
|
MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
|
pandas.core.index.MultiIndex.from_tuples
|
#!/usr/bin/env python
#This is a script that calculates collocates for a specific keyword by computing how often each word collocates with the keyword across a text corpus. A measure of the strength of association between the keyword and its collocates (MI) is calculated, and the results are saved as a single file containing the keyword, collocate, raw frequency, and the MI.
"""
Calculate collocates for specific keyword in text corpus
Parameters:
path: str <path-to-folder>
Usage:
Assignment2_collocation.py -p <path-to-folder>
Example:
$ python Assignment2_collocation.py -p data/100_english_novels/corpus
"""
# IMPORT LIBRARIES #
import os
import sys
sys.path.append(os.path.join("..")) # enabling communication with home directory
import pandas as pd
from pathlib import Path
import csv
import re
import string
import numpy as np
import argparse
# DEFINE TOKENIZE() FUNTION #
# First we need a tokenize() function that takes an input string and splits it into tokens (individual words) and then returns a list of the individual words
def tokenize(input_string):
# Using the compile() function from the re module to search for all characters except for lowercase and uppercase letters and apostrophes.
tokenizer = re.compile(r"[^a-zA-Z']+")
# Create a list of tokens (words) by splitting the input string at the compiling pattern defined above
token_list = tokenizer.split(input_string)
# Return the list of tokens (individual words)
return token_list
# DEFINE MAIN FUNCTION #
# Now we need a function that takes a path, a keyword, and a window size and calculates how often each word in the corpus collocates with the keyword across the corpus. It should then calculate the mutual information (MI) between the keyword and all collocates across the corpus. The MI is a measure of the strength of association between the keyword and the collocate in question. The function should return a dataframe with four columns: keyword, collocate, raw_frequency, and MI.
def main():
# First I want to define the arguments that the function requires in order to be run from the command line
# I do this using the argparse module
# Define function arguments
ap = argparse.ArgumentParser()
# Argument 1: the first argument is the path to the corpus directory
ap.add_argument("-p", "--path", required = True, help= "Path to directory of text corpus")
# Argument 2: the second argument is the keyword
ap.add_argument("-k", "--keyword", required = True, help= "Key/target word in lowercase letters")
# Argument 3: the third argument is the window size
ap.add_argument("-w", "--windowsize", required = True, help= "Window size in number of words")
# Create a variable containing the argument parameters defined above
args = vars(ap.parse_args())
# Define path to text corpus
input_path = args["path"]
# Define keyword
keyword = args["keyword"]
# Define window size
window_size = int(args["windowsize"])
# Now we can move on to the actual function
# Create empty list for all tokens across the corpus
token_list_all = []
#Create empty list of all collocates
collocates_list = []
# Create empty dataframe with the four columns
data =
|
pd.DataFrame(columns=["keyword", "collocate", "raw_frequency", "MI"])
|
pandas.DataFrame
|
#aggregation script
from distributed import wait
import pandas as pd
import geopandas as gpd
from panoptes_client import Panoptes
from shapely.geometry import box, Point
import json
import numpy as np
import os
from datetime import datetime
import utils
import extract
import start_cluster
def download_data(everglades_watch, min_version, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('classifications', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = pd.DataFrame(rows)
df["workflow_version"] = df.workflow_version.astype(float)
df = df[df.workflow_version > min_version]
df = df[df.workflow_name =="Counts and Behavior"]
return df
def download_subject_data(everglades_watch, savedir, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('subjects', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = pd.DataFrame(rows)
fname = "{}/{}.csv".format(savedir,"everglades-watch-subjects")
#Overwrite subject set
df.to_csv(fname)
return df
def load_classifications(classifications_file, min_version):
"""Load classifications from Zooniverse
classifications_file: path to .csv
"""
df = pd.read_csv(classifications_file)
df = df[df.workflow_version > min_version]
df = df[df.workflow_name =="Counts and Behavior"]
return df
def parse_additional_response(x):
annotation_dict = json.loads(x)[0]
response = annotation_dict["value"]
return response
def parse_front_screen(x):
#Extract and parse json
annotation_dict = json.loads(x)[0]
boxes = annotation_dict["value"]
if len(boxes) == 0:
return pd.DataFrame({"species":[None],"x":[None],"y":[None],"additional_observations":[None]})
boxes = pd.DataFrame(boxes)
boxes = boxes.rename(columns = {"tool_label": "label"})
#Loop through each box and create a dataframe
box_df = pd.DataFrame()
for index, box in boxes.iterrows():
box_df = box_df.append(box,ignore_index=True)
#Split label into Species and Behavior
new_columns = box_df.label.str.split("-",n=1,expand=True)
box_df["species"] = new_columns[0]
box_df["behavior"] = new_columns[1]
return box_df[["label","species","behavior","x","y"]]
def parse_uncommon_labels(x):
boxes = pd.DataFrame(x)
#This needs to be done carefully, as Zooniverse only returns the ordinal sublabel position
sublabels= {0:"Flying",1:"Courting",2:"Roosting/Nesting",3:"Unknown"}
#Loop through each box and create a dataframe
box_df = pd.DataFrame()
for index, box in boxes.iterrows():
#we used to allow multiples
value = box.details[0]["value"]
if type(value) is list:
value = value[0]
#If unknown class assign it to species, else its a behavior
if box.tool_label == "Other":
box["WriteInSpecies"] = value
box["behavior"] = None
else:
box["behavior"] = sublabels[value]
box_df = box_df.append(box,ignore_index=True)
box_df = box_df.rename(columns = {"tool_label": "species"})
box_df = box_df[["species","behavior","x","y"]]
return box_df
def parse_additional_observations(x):
"""Parse the optional second screen of less common labels"""
uncommon_annotation_dict = json.loads(x)[2]
results = [ ]
if len(uncommon_annotation_dict["value"]) > 0:
results.append(parse_uncommon_labels(uncommon_annotation_dict["value"]))
#combine results into a single dataframe
results =
|
pd.concat(results)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.