prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import sys
import pandas as pd
import spacy
import numpy as np
import math
import argparse
from argparse import ArgumentParser
nlp = spacy.load('en_core_web_lg')
parser = ArgumentParser()
parser.add_argument("--irecf", type=str, default='IREC Result of LDA model')
parser.add_argument("--walmidf", type=str, required=True, help='WALMID Result of LDA model')
args = parser.parse_args()
irecf = args.irecf
walmidf = args.walmidf
# irec
# learn embeddings of keywords
irec = pd.read_csv(irecf)
irecid_keywords_dict = {}
for cid, keywords in zip(irec['cluster_id'], irec['keyword'].tolist()):
if cid not in irecid_keywords_dict.keys():
cid = int('1'+str(cid))
irecid_keywords_dict[cid] = keywords
irec_id_vec = {}
for cid, cluster in irecid_keywords_dict.items():
clusters_vecs = []
cluster = ' '.join(cluster.split(', '))
for word in nlp(cluster):
clusters_vecs.append(word.vector)
cluster_vec = np.mean(np.array(clusters_vecs), axis=0)
irec_id_vec[cid] = cluster_vec
# count the number of companies
irec_rids_dict = {}
ireccid_rids_dict = {}
for cid, rid in zip(irec['cluster_id'], irec['rid']):
cid = int('1'+str(cid))
if cid not in ireccid_rids_dict.keys():
ireccid_rids_dict[cid] = [rid]
else:
if rid not in ireccid_rids_dict[cid]:
ireccid_rids_dict[cid].append(rid)
if rid not in irec_rids_dict.keys():
irec_rids_dict[rid] = ''
ireccid_rids_sorted = {k: v for k, v in sorted(ireccid_rids_dict.items(), key=lambda item: item[0])}
ireccid_ridnum_dict = {}
for cid, rids in ireccid_rids_sorted.items():
ireccid_ridnum_dict[cid] = len(rids)
# compute regional quotient
print('Number of rid in irec:', len(irec_rids_dict))
ireccid_rq_dict = {}
cid_rq_dict = {}
for cid, ridnum in ireccid_ridnum_dict.items():
ireccid_rq_dict[cid] = ridnum/len(irec_rids_dict)
cid_rq_dict[cid] = ridnum/len(irec_rids_dict)
# walmid
# learn embeddings of keywords
walmid = | pd.read_csv(walmidf) | pandas.read_csv |
import shutil, os, re
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
def getdata1(rawdata):
'''提取Id vs. Vg@Vd=5V的测试数据,此数据暂不需要插值'''
start = (rawdata['c'] == ' Id vs. Vg@Vd=5V').idxmax()
end = (rawdata['b'] == ' Initial Id vs. Vg@Vd=0.1V').idxmax()
# rawdata.iloc[start:end]
selected = rawdata.iloc[start:end]
selected1 = selected.drop('a',axis=1)
selected2 = | DataFrame(selected1.values) | pandas.DataFrame |
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, recall_score, confusion_matrix, classification_report
import subprocess
import joblib
# Get multiple outputs in the same cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Ignore all warnings
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# %%
# Reading the dataset
dc = | pd.read_csv("Churn_Modelling.csv") | pandas.read_csv |
import pandas as pd
from io import StringIO
from sklearn.impute import SimpleImputer
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from sequential_feature_selection import SBS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
pd.set_option('display.max_columns', None)
| pd.set_option('display.width', None) | pandas.set_option |
# Data : https://archive.ics.uci.edu/ml/datasets/Online+News+Popularity
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
n = 10
cols = [
"url",
"timedelta",
"n_tokens_title",
"n_tokens_content",
"n_unique_tokens",
"n_non_stop_words",
"n_non_stop_unique_tokens",
"num_hrefs",
"num_self_hrefs",
"num_imgs",
"num_videos",
"average_token_length",
"num_keywords",
"data_channel_is_lifestyle",
"data_channel_is_entertainment",
"data_channel_is_bus",
"data_channel_is_socmed",
"data_channel_is_tech",
"data_channel_is_world",
"kw_min_min",
"kw_max_min",
"kw_avg_min",
"kw_min_max",
"kw_max_max",
"kw_avg_max",
"kw_min_avg",
"kw_max_avg",
"kw_avg_avg",
"self_reference_min_shares",
"self_reference_max_shares",
"self_reference_avg_sharess",
"weekday_is_monday",
"weekday_is_tuesday",
"weekday_is_wednesday",
"weekday_is_thursday",
"weekday_is_friday",
"weekday_is_saturday",
"weekday_is_sunday",
"is_weekend",
"LDA_00",
"LDA_01",
"LDA_02",
"LDA_03",
"LDA_04",
"global_subjectivity",
"global_sentiment_polarity",
"global_rate_positive_words",
"global_rate_negative_words",
"rate_positive_words",
"rate_negative_words",
"avg_positive_polarity",
"min_positive_polarity",
"max_positive_polarity",
"avg_negative_polarity",
"min_negative_polarity",
"max_negative_polarity",
"title_subjectivity",
"title_sentiment_polarity",
"abs_title_subjectivity",
"abs_title_sentiment_polarity",
"shares",
]
data = pd.read_csv("../data/news/OnlineNewsPopularity.csv", names=cols, skiprows=1)
data["shares"] = np.log(data["shares"] + 1)
train, test = train_test_split(data, test_size=0.2, random_state=1337)
targets = ["shares"]
num_cols = [
"n_tokens_title",
"n_tokens_content",
"n_unique_tokens",
"n_non_stop_words",
"n_non_stop_unique_tokens",
"num_hrefs",
"num_self_hrefs",
"num_imgs",
"num_videos",
"average_token_length",
"num_keywords",
"data_channel_is_lifestyle",
"data_channel_is_entertainment",
"data_channel_is_bus",
"data_channel_is_socmed",
"data_channel_is_tech",
"data_channel_is_world",
"kw_min_min",
"kw_max_min",
"kw_avg_min",
"kw_min_max",
"kw_max_max",
"kw_avg_max",
"kw_min_avg",
"kw_max_avg",
"kw_avg_avg",
"self_reference_min_shares",
"self_reference_max_shares",
"self_reference_avg_sharess",
"weekday_is_monday",
"weekday_is_tuesday",
"weekday_is_wednesday",
"weekday_is_thursday",
"weekday_is_friday",
"weekday_is_saturday",
"weekday_is_sunday",
"is_weekend",
"LDA_00",
"LDA_01",
"LDA_02",
"LDA_03",
"LDA_04",
"global_subjectivity",
"global_sentiment_polarity",
"global_rate_positive_words",
"global_rate_negative_words",
"rate_positive_words",
"rate_negative_words",
"avg_positive_polarity",
"min_positive_polarity",
"max_positive_polarity",
"avg_negative_polarity",
"min_negative_polarity",
"max_negative_polarity",
"title_subjectivity",
"title_sentiment_polarity",
"abs_title_subjectivity",
"abs_title_sentiment_polarity",
]
cat_cols = []
for k in num_cols:
mean = train[k].mean()
std = train[k].std()
train[k] = (train[k] - mean) / std
test[k] = (test[k] - mean) / std
train = train.sample(frac=1)
for feature in cat_cols:
train[feature] = | pd.Series(train[feature], dtype="category") | pandas.Series |
'''
Module : make_supertranscript_ref
Description : Make reference supertranscript for cryptic variants
Copyright : (c) <NAME>, Sep 2018
License : MIT
Maintainer : github.com/mcmero
Portability : POSIX
Take VCF, contig_info and reference files, and make a supertranscript
for each contig including any novel bits inserted into the ref sequence
'''
import numpy as np
import pandas as pd
import re
import sys
import logging
import os
import tempfile
import pickle
import st_helper as sh
import bedtool_helper
from pybedtools import BedTool
from Bio import SeqIO
from argparse import ArgumentParser
from utils import cached, init_logging, exit_with_error
pd.set_option("mode.chained_assignment", None)
EXIT_FILE_IO_ERROR = 1
# headers for GTF file
GTF_COLS = ['chr', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']
BED_EXT_COLS = ['chr', 'start', 'end', 'name', 'score', 'strand', 'thickStart', 'thickEnd', 'itemRgb']
# only these variant types require modification to reference supertranscripts
VARS_TO_ANNOTATE = ['EE','NE','INS','RI','UN','FUS','DEL']
# regex masks
STRAND = r'\(([+-])\)'
# keep track of canonical genes written to avoid duplicate entries
canonical_genes_written = []
def parse_args():
'''
Parse command line arguments.
Returns Options object with command line argument values as attributes.
Will exit the program on a command line error.
'''
description = 'Make supertranscript reference'
parser = ArgumentParser(description=description)
parser.add_argument('--log',
metavar='LOG_FILE',
type=str,
help='record program progress in LOG_FILE')
parser.add_argument(dest='contig_info',
metavar='CONTIG_INFO',
type=str,
help='''Contig information for novel contigs.''')
parser.add_argument(dest='contig_vcf',
metavar='CONTIG_VCF',
type=str,
help='''Novel variants in VCF format.''')
parser.add_argument(dest='gtf_file',
metavar='GTF_FILE',
type=str,
help='''GTF annotation file containing transcript annotations.''')
parser.add_argument(dest='fasta',
metavar='FASTA',
type=str,
help='''Genome reference in fasta format.''')
parser.add_argument(dest='outdir',
metavar='OUTDIR',
type=str,
help='''Output directory.''')
parser.add_argument(dest='sample',
metavar='SAMPLE',
type=str,
help='''Sample name. Used to name bed and supertranscript output files.''')
return parser.parse_args()
#=====================================================================================================
# Utility functions
#=====================================================================================================
def reverse_complement(seq):
lookup = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'}
if seq == '':
return ''
if type(seq) == float and math.isnan(seq):
return ''
seq = seq[::-1]
seq = ''.join([lookup[base.upper()] for base in list(seq)])
return(seq)
def get_gene(attribute):
'''
extract gene name from a single GTF attribute string
'''
try:
return attribute['gene_name']
except KeyError:
return ''
def get_contig_genes(con_info):
'''
return gene1 and gene2 (in case of fusion),
indicating genes overlapping the given contig
'''
fus_genes = con_info[con_info.overlapping_genes.str.contains(':')]
if len(fus_genes) > 0:
fus_genes = np.unique(fus_genes.overlapping_genes)
fus_genes = [fg.split(':') for fg in fus_genes][0]
assert len(fus_genes) <= 2
return fus_genes[0], fus_genes[1]
else:
genes = np.unique(con_info.overlapping_genes.values)
if len(genes) > 1:
logging.info('WARNING: multiple overlapping genes found for contig %s' % con_info.contig_id.values[0])
return genes[0], ''
def get_contig_strand(con_info, variant):
'''
return contig alignment strand given the variant ID
'''
strand = '.'
if variant in con_info.variant_id.values:
var_info = con_info[con_info.variant_id == variant]
strand = re.search(STRAND, var_info.pos1.values[0]).group(1)
if variant in con_info.partner_id.values:
var_info = con_info[con_info.partner_id == variant]
strand = re.search(STRAND, var_info.pos2.values[0]).group(1)
return strand
def get_gene_strands(gtf, genes):
strands = []
for gene in genes:
gene = gene.split('|')[0] # take first gene as representative in case of overlaps
strand = gtf[gtf.gene == gene].strand.values
strand = strand[0] if gene != '' and len(strand) > 0 else ''
strands.append(strand)
return strands
def get_strand_info(con_info, gstrands):
if 'FUS' in con_info.variant_type.values:
con_fus = con_info[con_info.variant_type == 'FUS']
cs1 = get_contig_strand(con_fus, con_fus.variant_id.values[0])
cs2 = get_contig_strand(con_fus, con_fus.partner_id.values[0])
# if one fragment aligns on a strand counter to gene orientation
# while the other aligns matching gene orientation, return strands
# corresponding to contig alignment
gs1, gs2 = gstrands
if (cs1 != gs1 and cs2 == gs2) or (cs1 == gs1 and cs2 != gs2):
return [cs1, cs2]
else:
return gstrands
else:
return gstrands
#=====================================================================================================
# Read/write functions
#=====================================================================================================
def get_output_files(sample, outdir):
genome_bed = '%s/%s_genome.bed' % (outdir, sample)
st_block_bed = '%s/%s_blocks_supertranscript.bed' % (outdir, sample)
st_gene_bed = '%s/%s_genes_supertranscript.bed' % (outdir, sample)
st_fasta = '%s/%s_supertranscript.fasta' % (outdir, sample)
return genome_bed, st_block_bed, st_gene_bed, st_fasta
@cached('gene_gtf.pickle')
def load_gtf_file(gtf_file):
'''
load in reference GTF file containing gene/exon info
remove 'chr' prefix if present and extract gene names
'''
logging.info('Processing GTF reference file...')
gtf = BedTool(gtf_file).remove_invalid().saveas()
gene_names = [get_gene(row.attrs) for row in gtf]
with tempfile.NamedTemporaryFile(mode='r+') as temp_gtf:
gtf.saveas(temp_gtf.name)
gtf_pd = pd.read_csv(temp_gtf, header=None, sep='\t', names=GTF_COLS, comment='#', low_memory=False)
gtf_pd['gene'] = gene_names
# no non-standard chroms will be handled
# TODO: is there some way to properly handle alt contigs?
alt_chrs = gtf_pd['chr'].str.contains('Un|alt|unknown|random|K')
gtf_pd = gtf_pd[np.invert(alt_chrs.values)]
# extract gene name from gtf_pd and remove 'chr' prefix if present
gtf_pd_chrs = gtf_pd['chr'].str.contains('chr')
if any(gtf_pd_chrs.values):
gtf_pd['chr'] = gtf_pd['chr'].apply(lambda a: a.split('chr')[1])
gtf_pd.loc[gtf_pd['chr'] == 'M', 'chr'] = 'MT'
# create gene features if none exist
if len(gtf_pd[gtf_pd.feature == 'gene']) == 0:
aggregator = {'start': lambda x: min(x),
'end': lambda x: max(x)}
gene_gtf_pd = gtf_pd.groupby(['chr', 'score', 'strand', 'frame', 'gene'],
as_index=False, sort=False).agg(aggregator)
gene_gtf_pd['source'] = 'ALL'
gene_gtf_pd['attribute'] = ''
gene_gtf_pd['feature'] = 'gene'
gene_gtf_pd = gene_gtf_pd[GTF_COLS + ['gene']]
gtf_pd = gtf_pd.append(gene_gtf_pd)
return gtf_pd
def load_vcf_file(contig_vcf):
'''
load in VCF file containing novel contig variants
remove 'chr' prefix from chroms if present
'''
cvcf = | pd.read_csv(contig_vcf, sep='\t', header=None, comment='#', low_memory=False) | pandas.read_csv |
import numpy as np
import pandas as pd
from unittest import TestCase
from dataverk.exceptions.dataverk_exceptions import EnvironmentVariableNotSet
from dataverk.utils import anonymization
df_in = pd.DataFrame(data={'col1': [1, 2, 3, 4, 5, 6], 'col2': [33, 44, 55, 67, 765, 1111]})
df_values = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': [1, 2, 3, 4, 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
class MethodsReturnValues(TestCase):
def test_replace_w_defaults(self):
df_in_anonymizable_cols = pd.DataFrame(data={'col1': [1, 2, 3, 4, 5, 6], 'col2': [3, 4, 5, 6, 7, 8]})
expected_df_out = pd.DataFrame(data={'col1': ["*", "*", "*", 4, 5, 6], 'col2': [3, 4, 5, 6, 7, 8]})
df_out_all_defaults = anonymization.anonymize_replace(df_in_anonymizable_cols, eval_column='col1')
self.assertTrue(df_out_all_defaults.equals(expected_df_out))
def test_replace_based_on_lower_limit(self):
expected_df_out = pd.DataFrame(data={'col1': ["*", "*", "*", "*", 5, 6],
'col2': ["*", "*", "*", "*", 765, 1111]})
df_out = anonymization.anonymize_replace(df_in, eval_column='col1', anonymize_columns=['col2'],
evaluator=lambda x: x < 5)
self.assertTrue(df_out.equals(expected_df_out))
def test_replace_based_on_single_label(self):
exp_df_out_str = pd.DataFrame(data={'values': ['one', '*', 'three', 'four', 'five'],
'ints': [1, 2, 3, 4, 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out_str = anonymization.anonymize_replace(df_values, eval_column='values', evaluator=lambda x: x == 'two')
self.assertTrue(df_out_str.equals(exp_df_out_str))
exp_df_out_int = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': [1, "*", 3, 4, 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out_int = anonymization.anonymize_replace(df_values, eval_column='ints', evaluator=lambda x: x == 2)
self.assertTrue(df_out_int.equals(exp_df_out_int))
def test_replace_based_on_label_NoneType(self):
df_none = pd.DataFrame({'floats': [None, 1.0], 'ints': [1, 1]})
expected_df_out = pd.DataFrame({'floats': [0.0, 1.0], 'ints': [1, 1]})
df_out_none = anonymization.anonymize_replace(df_none, eval_column='floats', evaluator=lambda x: np.isnan(x),
replace_by=0)
self.assertTrue(df_out_none.equals(expected_df_out))
def test_replace_based_on_label_list(self):
exp_df_out_str = pd.DataFrame(data={'values': ['one', '*', 'three', 'four', 'five'],
'ints': [1, 2, 3, 4, 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out_str = anonymization.anonymize_replace(df_values, eval_column='values', evaluator=lambda x: x in ['two'])
self.assertTrue(df_out_str.equals(exp_df_out_str))
exp_df_out_str = pd.DataFrame(data={'values': ['one', '*', 'three', '*', 'five'],
'ints': [1, 2, 3, 4, 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out_str = anonymization.anonymize_replace(df_values, eval_column='values',
evaluator=lambda x: x in ['two', 'four'])
self.assertTrue(df_out_str.equals(exp_df_out_str))
exp_df_out_num = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': [1, "*", 3, "*", 5],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out_num = anonymization.anonymize_replace(df_values, eval_column='ints', evaluator=lambda x: x in [2, 4])
self.assertTrue(df_out_num.equals(exp_df_out_num))
exp_df_out_num = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': [1, 2, 3, 4, 5],
'floats': [1.0, "*", 3.0, "*", 5.0]})
df_out_num = anonymization.anonymize_replace(df_values, eval_column='floats', evaluator=lambda x: x in [2, 4])
self.assertTrue(df_out_num.equals(exp_df_out_num))
df_obj = pd.DataFrame({'labels': ['one', 1, 1.0, 33], 'ints': [1, 2, 3, 4]})
df_expected_out = pd.DataFrame({'labels': ['*', '*', '*', 33], 'ints': ['*', '*', '*', 4]})
df_out = anonymization.anonymize_replace(df_obj, eval_column='labels', anonymize_columns=['ints'],
evaluator=lambda x: x in ['one', 1])
self.assertTrue(df_out.equals(df_expected_out))
def test_not_anonymize_eval_col(self):
expected_df_out = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': ['*', 2, '*', 4, '*'],
'floats': [1.0, 2.0, 3.0, 4.0, 5.0]})
df_out = anonymization.anonymize_replace(df_values, eval_column='values', anonymize_columns=['ints'],
evaluator=lambda x: x in ['one', 'three', 'five'],
anonymize_eval=False)
self.assertTrue(df_out.equals(expected_df_out))
df_out = anonymization.anonymize_replace(df_values, eval_column='values', anonymize_columns=['values', 'ints'],
evaluator=lambda x: x in ['one', 'three', 'five'],
anonymize_eval=False)
self.assertTrue(df_out.equals(expected_df_out))
def test_no_change_in_df_in(self):
df_in_ = df_in.copy()
anonymization.anonymize_replace(df_in, eval_column='col1')
self.assertTrue(df_in.equals(df_in_))
anonymization.anonymize_replace(df_in, eval_column='col1', evaluator=lambda x: x in [1, 3])
self.assertTrue(df_in.equals(df_in_))
def test_anonymize_column_str_name(self):
exp_df_out = pd.DataFrame(data={'values': ['one', 'two', 'three', 'four', 'five'],
'ints': ["*", "*", "*", 4, 5],
'floats': ["*", "*", "*", 4.0, 5.0]})
df_out = anonymization.anonymize_replace(df_values, eval_column='ints', anonymize_columns='floats')
self.assertTrue(df_out.equals(exp_df_out))
df_out = anonymization.anonymize_replace(df_values, eval_column='ints', anonymize_columns='floats',
evaluator=lambda x: x in [1, 2, 3])
self.assertTrue(df_out.equals(exp_df_out))
def test_anonymize_column_num_name(self):
df_ = pd.DataFrame(data={0: [1, 2, 4], 1: [2, 3, 5]})
exp_df_out_ = pd.DataFrame(data={0: [1, 2, 4], 1: ['*', '*', 5]})
df_out_ = anonymization.anonymize_replace(df_, eval_column=1)
self.assertTrue(df_out_.equals(exp_df_out_))
def test_replace_by_single_value(self):
for r_val in [None, 1.5, 5, 'n/a']:
expected_df_out = pd.DataFrame(data={'col1': [r_val, r_val, r_val, 4, 5, 6],
'col2': [r_val, r_val, r_val, 67, 765, 1111]})
df_out = anonymization.anonymize_replace(df_in, eval_column='col1', anonymize_columns=['col2'],
replace_by=r_val)
self.assertTrue(df_out.equals(expected_df_out))
def test_replace_by_list(self):
exp_df_out = | pd.DataFrame(data={'col1': [0, 0, 0, 4, 5, 6], 'col2': [None, None, None, 67, 765, 1111]}) | pandas.DataFrame |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}, inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
def test_to_dense_deprecated(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
with tm.assert_produces_warning(FutureWarning):
cat.to_dense()
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_categories_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = Categorical(["A", np.nan])
tm.assert_categorical_equal(result, expected)
def test_set_categories_private(self):
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"])
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"], fastpath=True)
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
@pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
def test_remove_categories_raises(self, removals):
cat = Categorical(["a", "b", "a"])
message = re.escape("removals must all be in old categories: {'c'}")
with pytest.raises(ValueError, match=message):
cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, exp_categories_dropped)
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
tm.assert_index_equal(c.categories, exp_categories_dropped)
assert res is None
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan], categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, Index(np.array(["a", "b", "c"])))
exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(res.codes, exp_codes)
tm.assert_index_equal(c.categories, exp_categories_all)
val = ["F", np.nan, "D", "B", "D", "F", np.nan]
cat = Categorical(values=val, categories=list("ABCDEFG"))
out = cat.remove_unused_categories()
tm.assert_index_equal(out.categories, Index(["B", "D", "F"]))
exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(out.codes, exp_codes)
assert out.tolist() == val
alpha = list("abcdefghijklmnopqrstuvwxyz")
val = np.random.choice(alpha[::2], 10000).astype("object")
val[np.random.choice(len(val), 100)] = np.nan
cat = Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
assert out.tolist() == val.tolist()
class TestCategoricalAPIWithFactor(TestCategorical):
def test_describe(self):
# string type
desc = self.factor.describe()
assert self.factor.ordered
exp_index = CategoricalIndex(
["a", "b", "c"], name="categories", ordered=self.factor.ordered
)
expected = DataFrame(
{"counts": [3, 2, 3], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0]}, index=exp_index
)
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
exp_index = CategoricalIndex(
list("abcd"), ordered=self.factor.ordered, name="categories"
)
expected = DataFrame(
{"counts": [3, 2, 3, 0], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0, 0]},
index=exp_index,
)
tm.assert_frame_equal(desc, expected)
# check an integer one
cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
desc = cat.describe()
exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered, name="categories")
expected = DataFrame(
{"counts": [5, 3, 3], "freqs": [5 / 11.0, 3 / 11.0, 3 / 11.0]},
index=exp_index,
)
tm.assert_frame_equal(desc, expected)
# https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame(
{"counts": [1, 2, 1], "freqs": [1 / 4.0, 2 / 4.0, 1 / 4.0]},
index=CategoricalIndex(
[1, 2, np.nan], categories=[1, 2], name="categories"
),
)
tm.assert_frame_equal(desc, expected)
def test_set_categories_inplace(self):
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
tm.assert_index_equal(cat.categories, Index(["a", "b", "c", "d"]))
class TestPrivateCategoricalAPI:
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype="int8")
tm.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
with pytest.raises(AttributeError, match="can't set attribute"):
c.codes = np.array([0, 1, 2, 0, 1], dtype="int8")
# changes in the codes array should raise
codes = c.codes
with pytest.raises(ValueError, match="assignment destination is read-only"):
codes[4] = 1
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype="int8")
| tm.assert_numpy_array_equal(c.codes, exp) | pandas._testing.assert_numpy_array_equal |
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
from rpy2.robjects.vectors import StrVector
from rpy2.robjects.packages import importr, isinstalled
import pandas as pd
import numpy as np
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import joblib
def install_r_package(name):
if not isinstalled(name):
utils = importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages(StrVector([name]))
def _hclust_to_scipy_linkage(result, plot=True):
"""Turn R hclust result obj into scipy linkage matrix format"""
# in hclust merge matrix, negative value is for singleton
raw_linkage = pd.DataFrame(np.array(result[0]))
nobs = raw_linkage.shape[0] + 1
raw_linkage[2] = np.array(result[1])
raw_linkage.index = raw_linkage.index + nobs
# in hclust merge matrix, positive value is for non-singleton
scipy_linkage = raw_linkage.copy()
scipy_linkage[raw_linkage.iloc[:, :2] < 0] += nobs
scipy_linkage[raw_linkage.iloc[:, :2] > 0] += (nobs - 1)
total_obs = nobs
# add the 4th col: number of singleton
cluster_dict = {}
labels = list(range(total_obs))
for cur_cluster_id, (left, right, distance) in scipy_linkage.iterrows():
left = int(left)
right = int(right)
cluster_dict[cur_cluster_id] = {'left': set(), 'right': set()}
if (left < total_obs) and (right < total_obs):
left = labels[left]
right = labels[right]
# merge of 2 original observations
cluster_dict[cur_cluster_id]['left'].add(left)
cluster_dict[cur_cluster_id]['right'].add(right)
else:
# left and/or right are cluster
if left < total_obs:
left = labels[left]
cluster_dict[cur_cluster_id]['left'].add(left)
else:
# node are cluster
cluster_dict[cur_cluster_id]['left'].update(
cluster_dict[left]['left'])
cluster_dict[cur_cluster_id]['left'].update(
cluster_dict[left]['right'])
if right < total_obs:
right = labels[right]
cluster_dict[cur_cluster_id]['right'].add(right)
else:
# node are cluster
cluster_dict[cur_cluster_id]['right'].update(
cluster_dict[right]['left'])
cluster_dict[cur_cluster_id]['right'].update(
cluster_dict[right]['right'])
cur_cluster_id += 1
cluster_records = {}
for cluster, _sub_dict in cluster_dict.items():
total_n = len(_sub_dict['left']) + len(_sub_dict['right'])
cluster_records[cluster] = total_n
scipy_linkage[3] = pd.Series(cluster_records)
# dendrogram
orders = list(result[2])
labels = list(result[3])
# correct order of the final dendrogram
r_order = [labels[i - 1] for i in orders]
dendro = dendrogram(scipy_linkage.values, no_plot=True)
python_order = pd.Series({a: b for a, b in zip(dendro['leaves'], r_order)}).sort_index().tolist()
# python_order = [i[1:] for i in python_order]
if plot:
fig, ax = plt.subplots(dpi=300)
dendro = dendrogram(scipy_linkage.values, labels=tuple(python_order), no_plot=False, ax=ax)
ax.xaxis.set_tick_params(rotation=90)
else:
dendro = dendrogram(scipy_linkage.values, labels=tuple(python_order), no_plot=True)
return scipy_linkage, python_order, dendro
class Dendrogram:
def __init__(self,
nboot=1000,
method_dist='correlation',
method_hclust='average',
n_jobs=-1):
self.nboot = nboot
self.method_dist = method_dist
self.method_hclust = method_hclust
self.n_jobs = n_jobs
self.linkage = None
self.label_order = None
self.dendrogram = None
self.edge_stats = None
def fit(self, data):
"""
Parameters
----------
data
The data is in obs-by-var form, row is obs.
Returns
-------
"""
importr("base")
pvclust = importr("pvclust")
with localconverter(ro.default_converter + pandas2ri.converter):
# The data is in obs-by-var form, row is obs. Transpose for R.
r_df = ro.conversion.py2rpy(data.T)
if self.n_jobs == -1:
self.n_jobs = True
result = pvclust.pvclust(r_df,
nboot=self.nboot,
method_dist=self.method_dist,
method_hclust=self.method_hclust,
parallel=self.n_jobs)
# dendrogram info
hclust = result[0]
linkage, label_order, dendro = _hclust_to_scipy_linkage(hclust, plot=False)
self.linkage = linkage
self.label_order = label_order
self.dendrogram = dendro
# scores of edges by pvclust bootstrap
edge_stats = | pd.DataFrame(result[1], index=result[1].colnames) | pandas.DataFrame |
import numpy as np
import pandas as pd
import gpflow
from gpflow.utilities import print_summary
def make_subset_simplified(cmip6, start, end, column_name, mean_center=False):
Xmake_all = []
Ymake_all = []
dataset_names = cmip6.name.unique()
for n in dataset_names:
p = cmip6[cmip6.name == n]
X = np.expand_dims(p.x,1)
if mean_center:
# mean center based on 1850-2020 data
temp_mean_centered = p[column_name] - np.mean(p[column_name][0:2061])
Y = np.expand_dims(temp_mean_centered,1)
else:
Y = np.expand_dims(p[column_name],1)
keep_tf = np.logical_and(X[:,0] >= start, X[:,0] < end)
Xmake_all.append(X[keep_tf,:])
Ymake_all.append(Y[keep_tf,:])
return Xmake_all, Ymake_all, dataset_names
def make_subset(cmip6, start, end, column_name):
Xmake_all = []
Ymake_all = []
dataset_names = cmip6.name.unique()
for n in dataset_names:
p = cmip6[cmip6.name == n]
X = np.expand_dims(p.time_hrs_since01,1)
# globally mean centered, but maybe not for w/e subset we have here. This bias might accidentally help gp?
temp_mean_centered = p[column_name] - np.mean(p[column_name])
Y = np.expand_dims(temp_mean_centered,1)
keep_tf = np.logical_and(X[:,0] >= start, X[:,0] < end)
Xmake_all.append(X[keep_tf,:])
Ymake_all.append(Y[keep_tf,:])
return Xmake_all, Ymake_all, dataset_names
def fit_ml_single(Y,X,kern_maker):
opt = gpflow.optimizers.Scipy()
max_iter = 1000
kern = kern_maker()
m = gpflow.models.GPR(data=(X, Y), kernel=kern, mean_function=None)
opt_logs = opt.minimize(m.training_loss, m.trainable_variables, options=dict(maxiter=max_iter))
return {'model':m,
'converged':opt_logs['success']}
def fit_ml(Y_all, X_all, dataset_names, kern_maker, param_colnames, param_extractor, filename):
eb_results = pd.DataFrame([], columns=['dataset','convergence','lik_var'] + param_colnames)
opt = gpflow.optimizers.Scipy()
max_iter = 1000
if len(Y_all) != len(dataset_names):
print('Size mismatch. Y:', len(Y_all), '. Names:', len(dataset_names))
return 0
for i in range(len(Y_all)):
kern = kern_maker()
X = X_all[i]
Y = Y_all[i]
m = gpflow.models.GPR(data=(X, Y), kernel=kern, mean_function=None)
opt_logs = opt.minimize(m.training_loss, m.trainable_variables, options=dict(maxiter=max_iter))
#print_summary(m)
results = {'dataset': dataset_names[i],
'convergence': opt_logs['success'],
'lik_var': np.array(m.likelihood.variance)}
param_values = param_extractor(m)
for j in range(len(param_values)):
results[param_colnames[j]] = param_values[j]
eb_results = eb_results.append(results, ignore_index=True)
eb_results.to_csv(filename, index=False)
return eb_results
def compare_loo_gp(param_results, kernel_maker, Xtr_all, Ytr_all, Xte_all, Yte_all, pred_dir=None):
M = param_results.shape[0]
mse_group = np.zeros((M))
mse_single = np.zeros((M))
single_set = []
# b/c we dropped observed
if M != len(Xtr_all):
print('Size mismatch: M', M, ', data', len(Xtr_all))
return 0
for m in range(M):
dataset = param_results.dataset[m]
X_tr = Xtr_all[m]
Y_tr = Ytr_all[m]
X_te = Xte_all[m]
Y_te = Yte_all[m]
group_ests = param_results.drop(m).mean(numeric_only=True)
kern_group = kernel_maker(group_ests)
kern_single = kernel_maker(param_results.loc[m])
m_group = gpflow.models.GPR(data=(X_tr, Y_tr), kernel=kern_group, mean_function=None)
m_group.likelihood.variance = np.double(group_ests.lik_var)
mod = gpflow.models.GPR(data=(X_tr, Y_tr), kernel=kern_single, mean_function=None)
mod.likelihood.variance = np.double(param_results.lik_var[m])
pred_m_group, pred_var_group = m_group.predict_f(X_te)
pred_m, pred_var = mod.predict_f(X_te)
mse_group[m] = np.mean((Y_te[:,0] - pred_m_group[:,0])**2)
mse_single[m] = np.mean((Y_te[:,0]- pred_m[:,0])**2)
single_set.append(dataset)
if pred_dir is not None:
fn = pred_dir + 'test_group_' + param_results.dataset[m] + '.csv'
d = np.array([pred_m_group[:,0], pred_var_group[:,0], Y_te[:,0]]).T
dat = | pd.DataFrame(d, columns=['mean', 'var', 'obs']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
bentoo-calltree-analyser.py - Bottleneck analysis based on calltree
This tool computes relative/absolute percentage for selected events based on
calltree structure.
'''
from __future__ import division
from builtins import map
from past.utils import old_div
import sqlite3
import argparse
import pandas
import fnmatch
import re
import json
import sys
def glob_strings(source, patterns):
if not source or not patterns:
return []
return [x for x in source for y in patterns if fnmatch.fnmatch(x, y)]
def quote(string):
return "\"%s\"" % string
def find_first_of(contents, candidates):
for c in candidates:
try:
i = contents.index(c)
except ValueError:
i = -1
if i >= 0:
return (c, i)
return (None, -1)
def split_columns(columns):
'''split 'columns' into (index_columns, data_columns)'''
timer_column_index = columns.index("TimerName")
return (columns[:timer_column_index+1], columns[timer_column_index+1:])
def extract_column_names(conn, table="result"):
orig_row_factory = conn.row_factory
conn.row_factory = sqlite3.Row
r = conn.execute("SELECT * FROM %s LIMIT 1" % table).fetchone()
names = list(r.keys())
conn.row_factory = orig_row_factory
return names
def extract_timer_names(calltree):
timers = set()
def visit_tree(node):
timers.add(node["id"])
for x in node["children"]:
visit_tree(x)
visit_tree(calltree)
return list(timers)
def build_parent_map(calltree):
parents = {}
def visit_tree(tree, top_level=False):
if top_level:
parents[tree["id"]] = None
for x in tree["children"]:
parents[x["id"]] = tree["id"]
visit_tree(x)
visit_tree(calltree, top_level=True)
return parents
def build_abs_seq_map(calltree):
result = {}
level = {}
seq_obj = {"seq": 0}
def visit_tree(tree, curr_level=0):
level[tree["id"]] = curr_level
result[tree["id"]] = seq_obj["seq"]
seq_obj["seq"] += 1
for i, x in enumerate(tree["children"]):
visit_tree(x, curr_level+1)
visit_tree(calltree)
return (result, level)
def compute_percentage(ref_db, calltree_file, out_db,
columns=None, append=None, treelize_timer_name=False):
conn0 = sqlite3.connect(ref_db)
ref_columns = extract_column_names(conn0)
index_columns, data_columns = split_columns(ref_columns)
if columns:
for x in columns:
assert(x in data_columns)
data_columns = list(columns)
append_columns = []
if append:
append_columns.extend(append)
timer_column = find_first_of(ref_columns, ["TimerName", "Name"])[0]
if not timer_column:
raise ValueError("Can not find timer column")
index_columns.remove(timer_column)
data_columns.insert(0, timer_column)
calltree = json.load(file(calltree_file))
timer_names = extract_timer_names(calltree)
sql = list(map(quote, index_columns + data_columns + append_columns))
sql = "SELECT %s FROM result WHERE " % ", ".join(sql)
sql += " OR ".join("%s = \"%s\"" % (timer_column, x) for x in timer_names)
sql += " ORDER BY %s" % ", ".join(map(quote, index_columns))
data = pandas.read_sql_query(sql, conn0)
parents = build_parent_map(calltree)
abs_seq, level = build_abs_seq_map(calltree)
top_timer = calltree["id"]
def compute_group_percent(group):
result = pandas.DataFrame(group)
for c in data_columns:
if c == timer_column:
continue
values = {}
for k, v in parents.items():
if k == top_timer:
values[k] = group[group[timer_column] == k][c].max()
else:
values[k] = group[group[timer_column] == v][c].max()
top_value = values[top_timer]
abs_c = "%s_abs_percent" % c
rel_c = "%s_rel_percent" % c
result[abs_c] = old_div(result[c], top_value)
result[rel_c] = [values[x] for x in result[timer_column]]
result[rel_c] = old_div(result[c], result[rel_c])
def treelize(x):
return "|" + "--" * level[x] + " " + x
result["abs_seq"] = [abs_seq[x] for x in result[timer_column]]
if treelize_timer_name:
result[timer_column] = list(map(treelize, result[timer_column]))
else:
result["level"] = [level[x] for x in result[timer_column]]
result["parent"] = [parents[x] for x in result[timer_column]]
return result
final = []
for k, v in data.groupby(index_columns):
transformed = compute_group_percent(v)
final.append(transformed)
final = | pandas.concat(final, ignore_index=True) | pandas.concat |
from models import TokenDailyStats
from models import Token
import pydash
from pycoingecko import CoinGeckoAPI
from config import project_config
import os
from joblib import Parallel, delayed
from tqdm import tqdm
import numpy as np
import math
from datetime import datetime
from utils.date_util import DateUtil
from utils.upload_csv_to_gsc import upload_csv_to_gsc
from utils.import_gsc_to_bigquery import import_gsc_to_bigquery
import pandas
import re
valid_date = DateUtil.utc_start_of_date(datetime.strptime('2021-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'))
cg = CoinGeckoAPI()
columns = [
'symbol',
'address',
'coin_gecko_id',
'day',
'trading_vol_24h',
'high_price_24h',
'low_price_24h',
'circulating_supply',
'facebook_likes',
'fdv_to_tvl_ratio',
'fully_diluted_valuation',
'market_cap',
'market_cap_rank',
'max_supply',
'mcap_to_tvl_ratio',
'price',
'reddit_accounts_active_48h',
'reddit_average_comments_48h',
'reddit_average_posts_48h',
'reddit_subscribers',
'telegram_channel_user_count',
'total_supply',
'total_value_locked',
'twitter_followers',
'created_at',
'updated_at'
]
def transform_data_time():
regex_day = re.compile(r'2021')
query = {
'$or': [
{
'day': re.compile(r'2020')
},
{
'day': re.compile(r'2021')
}
]
}
total = TokenDailyStats.count(query)
limit = 100
round_times = math.ceil(total / limit)
print(range(0, round_times))
items = np.array_split(range(0, round_times), 64)
Parallel(n_jobs=64)(delayed(handle_transform)(item) for item in tqdm(items))
def handle_transform(round_times):
regex_day = re.compile(r'2021')
query = {
'$or': [
{
'day': re.compile(r'2020')
},
{
'day': re.compile(r'2021')
}
]
}
limit = 100
for round_time in round_times:
round_time = int(round_time)
token_daily_stats_arr = list(TokenDailyStats.find(query).skip(round_time * limit).limit(limit))
for token_daily_stats in token_daily_stats_arr:
print(token_daily_stats)
id = pydash.get(token_daily_stats, '_id')
day = pydash.get(token_daily_stats, 'day')
if not type(day) is datetime:
day = datetime.strptime(day, '%Y-%m-%d %H:%M:%S') if len(day) == 19 else datetime.fromisoformat(day)
created_at = pydash.get(token_daily_stats, 'created_at')
if not type(created_at) is datetime:
created_at = datetime.fromisoformat(created_at)
updated_at = pydash.get(token_daily_stats, 'updated_at')
if not type(updated_at) is datetime:
updated_at = datetime.fromisoformat(updated_at)
trading_vol_24h = pydash.get(token_daily_stats, 'trading_vol_24h')
high_price_24h = pydash.get(token_daily_stats, 'high_price_24h')
low_price_24h = pydash.get(token_daily_stats, 'low_price_24h')
circulating_supply = pydash.get(token_daily_stats, 'circulating_supply')
facebook_likes = pydash.get(token_daily_stats, 'facebook_likes')
fdv_to_tvl_ratio = pydash.get(token_daily_stats, 'fdv_to_tvl_ratio')
fully_diluted_valuation = pydash.get(token_daily_stats, 'fully_diluted_valuation')
market_cap = pydash.get(token_daily_stats, 'market_cap')
market_cap_rank = pydash.get(token_daily_stats, 'market_cap_rank')
max_supply = pydash.get(token_daily_stats, 'max_supply')
mcap_to_tvl_ratio = pydash.get(token_daily_stats, 'mcap_to_tvl_ratio')
price = pydash.get(token_daily_stats, 'price')
reddit_accounts_active_48h = pydash.get(token_daily_stats, 'reddit_accounts_active_48h')
reddit_average_comments_48h = pydash.get(token_daily_stats, 'reddit_average_comments_48h')
reddit_average_posts_48h = pydash.get(token_daily_stats, 'reddit_average_posts_48h')
reddit_subscribers = pydash.get(token_daily_stats, 'reddit_subscribers')
telegram_channel_user_count = pydash.get(token_daily_stats, 'telegram_channel_user_count')
total_supply = pydash.get(token_daily_stats, 'total_supply')
total_value_locked = pydash.get(token_daily_stats, 'total_value_locked')
twitter_followers = pydash.get(token_daily_stats, 'twitter_followers')
update = {
"day": day,
"trading_vol_24h": None if len(str(trading_vol_24h)) == 0 else trading_vol_24h,
"high_price_24h": None if len(str(high_price_24h)) == 0 else high_price_24h,
"low_price_24h": None if len(str(low_price_24h)) == 0 else low_price_24h,
"circulating_supply": None if len(str(circulating_supply)) == 0 else circulating_supply,
"facebook_likes": None if len(str(facebook_likes)) == 0 else facebook_likes,
"fdv_to_tvl_ratio": None if len(str(fdv_to_tvl_ratio)) == 0 else fdv_to_tvl_ratio,
"fully_diluted_valuation": None if len(str(fully_diluted_valuation)) == 0 else fully_diluted_valuation,
"market_cap": None if len(str(market_cap)) == 0 else market_cap,
"market_cap_rank": None if len(str(market_cap_rank)) == 0 else market_cap_rank,
"max_supply": None if len(str(max_supply)) == 0 else max_supply,
"mcap_to_tvl_ratio": None if len(str(mcap_to_tvl_ratio)) == 0 else mcap_to_tvl_ratio,
"price": None if len(str(price)) == 0 else price,
"reddit_accounts_active_48h": None if len(
str(reddit_accounts_active_48h)) == 0 else reddit_accounts_active_48h,
"reddit_average_comments_48h": None if len(
str(reddit_average_comments_48h)) == 0 else reddit_average_comments_48h,
"reddit_average_posts_48h": None if len(
str(reddit_average_posts_48h)) == 0 else reddit_average_posts_48h,
"reddit_subscribers": None if len(str(reddit_subscribers)) == 0 else reddit_subscribers,
"telegram_channel_user_count": None if len(
str(telegram_channel_user_count)) == 0 else telegram_channel_user_count,
"total_supply": None if len(str(total_supply)) == 0 else total_supply,
"total_value_locked": None if len(str(total_value_locked)) == 0 else total_value_locked,
"twitter_followers": None if len(str(twitter_followers)) == 0 else twitter_followers,
"created_at": created_at,
"updated_at": updated_at
}
TokenDailyStats.update_one(query={"_id": id}, set_dict=update, upsert=True)
def fix_token_daily_stats():
fix_null_valid_token_daily_stats()
fix_duplicate_token_daily_stats()
fix_missing_token_daily_stats_coin_gecko_ids()
def fix_null_valid_token_daily_stats():
query = {
"trading_vol_24h": None,
"market_cap": None,
"price": None
}
update = {
"row_status": "disable"
}
result = TokenDailyStats.update_many(query=query, set_dict=update)
def fix_duplicate_token_daily_stats():
result = TokenDailyStats.aggregate([
{
"$match": {
"row_status": {"$ne": "disable"}
}
},
{
"$group": {
"_id": {
"coin_gecko_id": "$coin_gecko_id",
"day": "$day"
},
"count": {"$sum": 1}
}
},
{
"$match": {
"count": {"$gt": 1}
}
}
])
daily_check_stats = list(result)
print(len(daily_check_stats))
for daily_check_stats in daily_check_stats:
coin_gecko_id = pydash.get(daily_check_stats, '_id.coin_gecko_id')
if not coin_gecko_id:
continue
day = pydash.get(daily_check_stats, '_id.day')
daily_query = {
"coin_gecko_id": coin_gecko_id,
"day": day,
"row_status": {"$ne": "disable"}
}
first_result = TokenDailyStats.find_one(query=daily_query)
first_result_id = pydash.get(first_result, '_id')
fix_query = {
"_id": {"$ne": first_result_id},
"coin_gecko_id": coin_gecko_id,
"day": day
}
fix_update = {
"row_status": "disable"
}
print(fix_query, fix_update)
TokenDailyStats.update_many(query=fix_query, set_dict=fix_update)
def fix_missing_token_daily_stats(coin_gecko_id, fix_key):
coin_gecko_id = str(coin_gecko_id)
start_check_time = get_start_check_time(coin_gecko_id, fix_key)
token_daily_stats_repair_data(coin_gecko_id, fix_key, start_check_time)
def token_daily_stats_repair_data(coin_gecko_id, fix_key, start_check_time):
start_query = {
"row_status": {"$ne": "disable"},
"coin_gecko_id": coin_gecko_id,
"day": {"$gte": start_check_time},
fix_key: {"$ne": None}
}
print(start_query)
end_query = {
"row_status": {"$ne": "disable"},
"coin_gecko_id": coin_gecko_id,
"day": {"$gte": start_check_time}
}
earliest_token_daily_statss = list(
TokenDailyStats.find(start_query, {'day': 1, fix_key: 1}).sort('day', 1).limit(1))
earliest_token_daily_stats_day = pydash.get(earliest_token_daily_statss, '0.day')
if not earliest_token_daily_stats_day:
return
exists_latest_daily_statss = list(TokenDailyStats.find(end_query, {'day': 1, fix_key: 1}).sort('day', -1).limit(1))
exists_latest_token_daily_stats_day = pydash.get(exists_latest_daily_statss, '0.day')
latest_token_daily_stats_day = DateUtil.utc_start_of_date()
diff_days = DateUtil.days_diff(earliest_token_daily_stats_day, latest_token_daily_stats_day)
for i in range(0, diff_days):
print("day =====>>", i)
execution_date = DateUtil.utc_x_hours_after(24 * i, earliest_token_daily_stats_day)
handle_token_daily_stats_repair_data(coin_gecko_id, execution_date, fix_key)
def handle_token_daily_stats_repair_data(coin_gecko_id, execution_date, fix_key):
token_info = Token.find_one(query={"coin_gecko_id": coin_gecko_id})
symbol = pydash.get(token_info, 'symbol')
address = pydash.get(token_info, 'token')
query = {
"row_status": {"$ne": "disable"},
"coin_gecko_id": coin_gecko_id,
"day": execution_date,
fix_key: {"$ne": None}
}
execution_date_token_stats = TokenDailyStats.find_one(query=query)
if not execution_date_token_stats:
first_date = DateUtil.utc_x_hours_ago(24 * 1, execution_date)
second_date = DateUtil.utc_x_hours_ago(24 * 2, execution_date)
arr_dates = [first_date, second_date]
date_query = {
"row_status": {"$ne": "disable"},
'day': {'$in': arr_dates},
'coin_gecko_id': coin_gecko_id,
fix_key: {"$ne": None}
}
dates_results = TokenDailyStats.find_list(query=date_query)
value_arr_result = []
for dates_result in dates_results:
fix_value = pydash.get(dates_result, fix_key)
value_arr_result.append(fix_value)
if len(value_arr_result) == 0:
print('************************** not find data', coin_gecko_id)
return
avg_fix_value = pydash.sum_(value_arr_result) / len(value_arr_result)
if pydash.includes(['circulating_supply', 'total_supply', 'max_supply'], fix_key):
fix_value = avg_fix_value
else:
fix_value = avg_fix_value + avg_fix_value * 0.01
forge_key = 'forge_{key}'.format(key=fix_key)
new_data_query = {
"row_status": {"$ne": "disable"},
"coin_gecko_id": coin_gecko_id,
"day": execution_date
}
update = {
forge_key: True,
'coin_gecko_id': coin_gecko_id,
'day': execution_date,
'symbol': symbol,
'address': address,
fix_key: fix_value,
'updated_at': DateUtil.utc_current(),
'created_at': DateUtil.utc_current()
}
print(update)
TokenDailyStats.update_one(query=new_data_query, set_dict=update, upsert=True)
def get_coin_gecko_ids():
coin_gecko_ids = TokenDailyStats.distinct('coin_gecko_id')
return coin_gecko_ids
def get_start_check_time(coin_gecko_id, fix_key):
token_info_query = {
"coin_gecko_id": coin_gecko_id
}
token_info = Token.find_one(query=token_info_query)
check_time = pydash.get(token_info, 'check_time')
if not check_time:
query = {
"row_status": {"$ne": "disable"},
"coin_gecko_id": coin_gecko_id,
fix_key: {"$ne": None},
"day": {"$gte": valid_date}
}
print(query)
token_daily_stats = list(TokenDailyStats.find(query, {'day': 1}).sort('day', 1).limit(1))
check_time = pydash.get(token_daily_stats, '0.day')
return check_time
def fix_missing_token_daily_stats_coin_gecko_ids():
coin_gecko_ids = get_coin_gecko_ids()
group_coin_gecko_id_items = np.array_split(coin_gecko_ids, 64)
Parallel(n_jobs=64)(delayed(fix_missing_token_daily_stats_all_keys_by_group_coin_gecko_id_item)(item) for item in tqdm(group_coin_gecko_id_items))
def fix_missing_token_daily_stats_all_keys_by_group_coin_gecko_id_item(items):
for item in items:
keys = [
"price",
"total_supply",
"max_supply",
"circulating_supply",
"high_price_24h",
"low_price_24h",
"fully_diluted_valuation",
"market_cap",
"trading_vol_24h",
"fdv_to_tvl_ratio"
]
for key in keys:
fix_missing_token_daily_stats(item, key)
Token.update_one(query={'coin_gecko_id': item}, set_dict={'restore_status': 'done'})
def token_info_add_mark_check_time(coin_gecko_id, check_time):
query = {
"coin_gecko_id": coin_gecko_id
}
TokenDailyStats.update_one(query=query, set_dict={"check_time": check_time})
def token_daily_stats_upload_and_import():
query = {
'day': {'$gte': DateUtil.utc_start_of_date(datetime.strptime('2021-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'))},
'row_status': {'$ne': 'disable'}
}
dates = TokenDailyStats.distinct('day', query)
for date in dates:
handle_write_data_to_csv(date)
handle_upload_csv_to_gsc(date)
print(date)
handle_import_gsc_csv_to_bigquery()
def handle_write_data_to_csv(execution_date):
values = []
token_daily_datas = TokenDailyStats.find({'day': execution_date, 'row_status': {'$ne': 'disable'}})
for token_daily in token_daily_datas:
symbol = pydash.get(token_daily, 'symbol')
address = pydash.get(token_daily, 'address')
coin_gecko_id = pydash.get(token_daily, 'coin_gecko_id')
day = pydash.get(token_daily, 'day')
trading_vol_24h = pydash.get(token_daily, 'trading_vol_24h')
high_price_24h = pydash.get(token_daily, 'high_price_24h')
low_price_24h = pydash.get(token_daily, 'low_price_24h')
circulating_supply = pydash.get(token_daily, 'circulating_supply')
facebook_likes = pydash.get(token_daily, 'facebook_likes')
fdv_to_tvl_ratio = pydash.get(token_daily, 'fdv_to_tvl_ratio')
fully_diluted_valuation = pydash.get(token_daily, 'fully_diluted_valuation')
market_cap = pydash.get(token_daily, 'market_cap')
market_cap_rank = pydash.get(token_daily, 'market_cap_rank')
max_supply = pydash.get(token_daily, 'max_supply')
mcap_to_tvl_ratio = pydash.get(token_daily, 'mcap_to_tvl_ratio')
price = pydash.get(token_daily, 'price')
reddit_accounts_active_48h = pydash.get(token_daily, 'reddit_accounts_active_48h')
reddit_average_comments_48h = pydash.get(token_daily, 'reddit_average_comments_48h')
reddit_average_posts_48h = pydash.get(token_daily, 'reddit_average_posts_48h')
reddit_subscribers = pydash.get(token_daily, 'reddit_subscribers')
telegram_channel_user_count = pydash.get(token_daily, 'telegram_channel_user_count')
total_supply = pydash.get(token_daily, 'total_supply')
total_value_locked = pydash.get(token_daily, 'total_value_locked')
twitter_followers = pydash.get(token_daily, 'twitter_followers')
created_at = pydash.get(token_daily, 'created_at')
updated_at = pydash.get(token_daily, 'updated_at')
values.append({
'symbol': symbol,
'address': address,
'coin_gecko_id': coin_gecko_id,
'day': day,
'trading_vol_24h': trading_vol_24h,
'high_price_24h': high_price_24h,
'low_price_24h': low_price_24h,
'circulating_supply': circulating_supply,
'facebook_likes': facebook_likes,
'fdv_to_tvl_ratio': fdv_to_tvl_ratio,
'fully_diluted_valuation': fully_diluted_valuation,
'market_cap': market_cap,
'market_cap_rank': market_cap_rank,
'max_supply': max_supply,
'mcap_to_tvl_ratio': mcap_to_tvl_ratio,
'price': price,
'reddit_accounts_active_48h': reddit_accounts_active_48h,
'reddit_average_comments_48h': reddit_average_comments_48h,
'reddit_average_posts_48h': reddit_average_posts_48h,
'reddit_subscribers': reddit_subscribers,
'telegram_channel_user_count': telegram_channel_user_count,
'total_supply': total_supply,
'total_value_locked': total_value_locked,
'twitter_followers': twitter_followers,
'updated_at': updated_at,
'created_at': created_at
})
df = | pandas.DataFrame(values, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
we test .agg behavior / note that .apply is tested
generally in test_groupby.py
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
class TestGroupByAggregate(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes(self):
# GH 12821
df = DataFrame(
{'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.first(), exp)
assert_frame_equal(grouped.agg('first'), exp)
assert_frame_equal(grouped.agg({'time': 'first'}), exp)
assert_series_equal(grouped.time.first(), exp['time'])
assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.last(), exp)
assert_frame_equal(grouped.agg('last'), exp)
assert_frame_equal(grouped.agg({'time': 'last'}), exp)
assert_series_equal(grouped.time.last(), exp['time'])
assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.agg(len), exp)
assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes(self):
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
assert_series_equal(result, expected)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
pytest.raises(Exception, grouped.agg, lambda x: x.describe())
pytest.raises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert self.ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_float64_no_int64(self):
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5],
"c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
| assert_frame_equal(result, expected, check_like=True) | pandas.util.testing.assert_frame_equal |
import os
import pandas as pd
path = os.getcwd()
files = os.listdir('./files')
df_total = pd.DataFrame()
for file in files: # loop through Excel files
if file.endswith('.xlsx'):
excel_file = | pd.ExcelFile('./files/'+file) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import pandas as pd # pandas pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfidfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = pd.read_csv('train_labels.txt', sep='\t', header=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_samples = pd.read_csv('train_samples.txt', sep='\t', header=None, engine='python')
train_samples = train_samples.to_numpy()
train_samples = train_samples[:,1] # pastram doar cuvintele
validation_samples = pd.read_csv('validation_samples.txt', sep='\t', header=None, engine='python')
validation_samples = validation_samples.to_numpy()
validation_samples = validation_samples[:,1] # salvam cuvintele
validation_labels = | pd.read_csv('validation_labels.txt', sep='\t', header=None, engine='python') | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Process Data
# ## Load Libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
# ## Load Data
def load_data(messages_filepath, categories_filepath):
"""
Input:
1. messages_filepath: path of messages datasets
2. categories_filepath: path of categories datasets
Output:
1. df: merged dataframe, which contains data from messages, categories files
Process:
1. Load the required datasets, messages, categories
2. Merge the two datasets
"""
# Load messages dataset
messages = pd.read_csv(messages_filepath)
# Load categories dataset
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import pandas as pd
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
# Reading the cleaned data
df = pd.read_csv('model_dataset.csv')
# Dummy variables creation
bed_room_dummies = pd.get_dummies(df['number_of_bedrooms'], prefix='bed_rm', drop_first=True)
bath_room_dummies = pd.get_dummies(df['numberofbathrooms_per_house'], prefix='bath_rm', drop_first=True)
floor_dummies = pd.get_dummies(df['floors'], prefix='flr', drop_first=True)
house_condition_dummies = | pd.get_dummies(df['house_condition'], prefix='cond', drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
#from scipy.stats import linregress
from scipy.stats import median_absolute_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
from meltcurves import melt_fromdebris_func
from meltcurves import debris_frommelt_func
#%%% ===== SCRIPT OPTIONS =====
option_hd_melt_uncertainty = False
hd_uncertainty_schematic_fig = False
option_melt_diagram_template = False
hd_methods_diagram_ngoz = False
#hd_obs_fp = debris_prms.main_directory + '/../hd_obs/'
#%% ===== FUNCTIONS =====
def hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value = 1.645):
""" Calculate hd-melt relationship for uncertainty """
# Dataset of melt data
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
hd_wbnds_array_list = []
for n in np.arange(0,len(measured_hd_list)):
yearfracs = yearfracs_list[n]
start_yearfrac = yearfracs[0]
end_yearfrac = yearfracs[1]
# # Hack to just have one curve per glacier from 2000 - 2015
# for n in [0]:
# start_yearfrac = 2000.6
# end_yearfrac = 2018.6
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_thicknesses = ds_ostrem.hd_cm.values
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
# melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
# melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
# melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
debris_4curve = np.arange(0.02,3.01,0.01)
# column 0 = hd
# column 1 = melt
# column 2 = melt bndlow
# column 3 = melt bndhigh
# column 4 = hd bndlow debris properties
# column 5 = hd bndhigh debris properties
# column 6 = hd bndlow elevchg
# column 7 = hd bndhigh elevchg
# column 8 = hd bndlow combined
# column 9 = hd bndhigh combined
hd_wbnds_array = np.zeros((len(debris_4curve), 10))
for ndebris, hd in enumerate(debris_4curve):
# Invert melt against bounded curves to get the uncertainty
melt_mean = melt_fromdebris_func(hd, func_coeff[0], func_coeff[1])
hd_low = debris_frommelt_func(melt_mean, func_coeff_bndlow[0], func_coeff_bndlow[1])
if hd_low < 0:
hd_low = 0
hd_high = debris_frommelt_func(melt_mean, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# Increase/decrease melt based on elevation change uncertainty and get change in debris thickness
melt_bndlow = melt_mean + elevchg_mwea_zadj
melt_bndhigh = melt_mean - elevchg_mwea_zadj
hd_bndlow_elevchg = debris_frommelt_func(melt_bndlow, func_coeff[0], func_coeff[1])
if hd_bndlow_elevchg < 0:
hd_bndlow_elevchg = 0
hd_bndhigh_elevchg = debris_frommelt_func(melt_bndhigh, func_coeff[0], func_coeff[1])
# Combined root sum of squares of deviations
hd_bndlow_both = hd - ((hd - hd_low)**2 + (hd - hd_bndlow_elevchg)**2)**0.5
hd_bndhigh_both = hd + ((hd - hd_high)**2 + (hd - hd_bndhigh_elevchg)**2)**0.5
# Max combined
# hd_bndlow_max = debris_frommelt_func(melt_bndlow, func_coeff_bndlow[0], func_coeff_bndlow[1])
# if hd_bndlow_max < 0:
# hd_bndlow_max = 0
# hd_bndhigh_max = debris_frommelt_func(melt_bndhigh, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# Record data
hd_wbnds_array[ndebris,:] = [hd, melt_mean, melt_bndlow, melt_bndhigh, hd_low, hd_high,
hd_bndlow_elevchg, hd_bndhigh_elevchg, hd_bndlow_both, hd_bndhigh_both]
# print(np.round(hd,2), ' melt:', np.round(melt_mean,2),
# 'bnds:', str(np.round(hd_low,2)) + '-' + str(np.round(hd_high,2)),
# ' bndelev:', str(np.round(hd_bndlow_elevchg,2)) + '-' + str(np.round(hd_bndhigh_elevchg,2)),
# ' bndboth:', str(np.round(hd_bndlow_both,2)) + '-' + str(np.round(hd_bndhigh_both,2)),
## ' bndmax:', str(np.round(hd_bndlow_max,2)) + '-' + str(np.round(hd_bndhigh_max,2))
# )
hd_wbnds_array_list.append(hd_wbnds_array)
return hd_wbnds_array_list
#%%
if option_hd_melt_uncertainty:
# glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '10.01732', '11.00719', '11.02472', '11.02810', '11.02858',
# '11.03005', '12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733',
# '15.03743', '15.04045', '15.07886', '15.11758', '18.02397']
glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '11.00719', '11.02472', '11.02810', '11.02858', '11.03005',
'12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733', '15.03743',
'15.04045', '15.07886', '15.11758', '18.02397']
# glaciers = ['10.01732']
# z_value = 1.645 # 5-95%
z_value = 1 # 16-84%
# z_value = 0.675 # 25-75%
elevchg_mwea_std = 0.72
elevchg_mwea_zadj = z_value * elevchg_mwea_std
hd_wbnds_array_all = None
# ===== KENNICOTT (1.15645) ====
if '1.15645' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/1.15645_kennicott_anderson_2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6150N-21700E-debris_melt_curve.nc'
yearfracs_list = [[2011 + 169/365, 2011 + 228/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Emmons (2.14297) ====
if '2.14297' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/2.14297_moore2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-23825E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 212/365, 2014 + 222/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Svinafellsjokull (06.00474) ====
# if '6.00474' in glaciers:
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/6.00474_moller2016-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '6400N-34325E-debris_melt_curve.nc'
# yearfracs_list = [[2013 + 137/365, 2013 + 150/365]]
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
#
# for hd_wbnds_array in hd_wbnds_array_list:
# if hd_wbnds_array_all is None:
# hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
# else:
# hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Larsbreen (7.01044) ====
if '7.01044' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/7.01044_larsbreen_NB2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '7825N-1600E-debris_melt_curve.nc'
yearfracs_list = [[2002 + 191/366, 2002 + 202/366]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== <NAME> (10.01732) ====
# if '10.01732' in glaciers:
## print('\nmelt comparison with Mayer et al (2011)')
# assert True == False, '10.01732 NEEDS TO DO THE MODELING FIRST!'
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/10.01732_mayer2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '5000N-8775E-debris_melt_curve.nc'
# yearfracs_list = [[2007 + 192/365, 2007 + 211/365]]
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
#
# for hd_wbnds_array in hd_wbnds_array_list:
# if hd_wbnds_array_all is None:
# hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
# else:
# hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Vernagtferner (11.00719) ====
if '11.00719' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.00719_vernagtferner_juen2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-1075E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 176/365, 2010 + 191/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Vernocolo (11.02472) =====
if '11.02472' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02472_bocchiola2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4625N-1050E-debris_melt_curve.nc'
yearfracs_list = [[2007 + 222/365, 2007 + 256/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Arolla (11.02810) ====
if '11.02810' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02810_arolla_reid2012-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-750E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 209/365, 2010 + 252/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Belvedere (11.02858) ====
if '11.02858' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02858_belvedere_nb2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4600N-800E-debris_melt_curve.nc'
yearfracs_list = [[2003 + 218/365, 2003 + 222/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== MIAGE (11.03005) ====
if '11.03005' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.03005_reid2010-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4650N-1050E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 172/365, 2005 + 247/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Zopkhito (12.01012) ====
# if '12.01012' in glaciers:
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2008.csv')
# mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01012_lambrecht2011-melt2009.csv')
# measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
# measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
## measured_melt_list = [mb_df['melt_mf'].values, mb_df2['melt_mf'].values]
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '4300N-4350E-debris_melt_curve.nc'
# yearfracs_list = [[2008 + 172/366, 2008 + 179/366], [2009 + 182/365, 2009 + 189/365]]
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
#
# for hd_wbnds_array in hd_wbnds_array_list:
# if hd_wbnds_array_all is None:
# hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
# else:
# hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Djankuat (12.01132) ====
# if '12.01132' in glaciers:
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/12.01132_lambrecht2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '4325N-4275E-debris_melt_curve.nc'
# yearfracs_list = [[2008 + 172/366, 2008 + 246/366]]
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
#
# for hd_wbnds_array in hd_wbnds_array_list:
# if hd_wbnds_array_all is None:
# hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
# else:
# hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== S Inylchek (13.05000) ====
if '13.05000' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.05000_hagg2008-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4200N-8025E-debris_melt_curve.nc'
yearfracs_list = [[2005 + 211/365, 2005 + 222/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# # ===== No 72 =====
# if '13.43165' in glaciers:
# print('\nmelt comparison with Wang et al (2011)')
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43165_wang2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mmwed'].values]
# glac_name = "No 72 (13.43165)"
# fig_fn = '13.43165_hd_melt_wang2011.png'
# ds_names = ['8/10/10$\u2009$-$\u2009$8/29/10']
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '4175N-8000E-debris_melt_curve.nc'
# yearfracs_list = [[2010 + 222/365, 2010 + 241/365]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
## melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
## melt_tick_major, melt_tick_minor = 10, 5
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 0.1) * 0.1,1) + 0.1
# melt_tick_major, melt_tick_minor = 0.5, 0.1
#
# print('NEED THE DATES!')
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor)
# ===== Koxkar (13.43232) ====
# if '13.43232' in glaciers:
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/13.43232_juen2014-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '4175N-8000E-debris_melt_curve.nc'
# yearfracs_list = [[2010 + 222/365, 2010 + 241/365]]
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_wbnds_array_list = hd_melt_uncertaintsy(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
#
# for hd_wbnds_array in hd_wbnds_array_list:
# if hd_wbnds_array_all is None:
# hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
# else:
# hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Baltoro (14.06794) ====
if '14.06794' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_mihalcea2006-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.06794_groos2017-melt.csv')
measured_hd_list = [mb_df.hd_m.values, mb_df2.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values, mb_df2.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3575N-7650E-debris_melt_curve.nc'
yearfracs_list = [[2004 + 186/366, 2004 + 196/366],
[2011 + 203/365, 2011 + 222/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Batal (14.16042) ====
if '14.16042' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/14.16042_patel2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '3225N-7750E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 213/365, 2014 + 288/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Khumbu (15.03733) ====
if '15.03733' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03733_kayastha2000-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2000 + 143/366, 2000 + 153/366]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== <NAME> (15.03743) ====
if '15.03743' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.03743_rounce2015-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2800N-8700E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 138/365, 2014 + 315/365]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Lirung (15.04045) ====
if '15.04045' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df1 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_fall-melt.csv')
mb_df2 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_winter-melt.csv')
mb_df3 = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.04045_chand2015_spring-melt.csv')
measured_hd_list = [mb_df1.hd_m.values, mb_df2.hd_m.values, mb_df3.hd_m.values]
measured_melt_list = [mb_df1.melt_mmwed.values, mb_df2.melt_mmwed.values, mb_df3.melt_mmwed.values]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2825N-8550E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 265/365, 2013 + 276/365], [2013 + 333/365, 2013 + 346/365],
[2014 + 97/365, 2014 + 109/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.02
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 10, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== Hailuogou (15.07886) ====
if '15.07886' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
measured_hd_list = [np.array([2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 6, 7, 7, 10, 10, 11, 13]) / 100]
measured_melt_list = [np.array([65.2, 55.4, 52.8, 51.6, 47.0, 53.4, 44.4, 50.3, 58, 48.9, 58.4, 54.4, 44.8,
52.6, 43.7, 52.5, 38.5, 36.5, 34.2, 28.4])]
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '2950N-10200E-debris_melt_curve.nc'
yearfracs_list = [[2008 + 184/366, 2008 + 274/366]]
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_wbnds_array_list = hd_melt_uncertainty(measured_hd_list, yearfracs_list, melt_fp, melt_fn, z_value=z_value)
for hd_wbnds_array in hd_wbnds_array_list:
if hd_wbnds_array_all is None:
hd_wbnds_array_all = hd_wbnds_array[:,:,np.newaxis]
else:
hd_wbnds_array_all = np.concatenate((hd_wbnds_array_all, hd_wbnds_array[:,:,np.newaxis]), axis=2)
# ===== 24K (15.11758) ====
if '15.11758' in glaciers:
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_yang2017-melt.csv')
mb_df2 = | pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/15.11758_wei2010-melt.csv') | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
import catboost as ctb
from tqdm import tqdm
from sklearn import preprocessing
from sklearn.model_selection import KFold, StratifiedKFold,train_test_split
from sklearn.metrics import mean_squared_error
def eval_Features(df,feature_nan= {'nan': -1}):
feature_tmp = []
for index,feature in tqdm(enumerate(df['Features'].values)):
feature_tmp.append(eval(feature,feature_nan))
feature_tmp = pd.DataFrame(feature_tmp)
feature_tmp.columns = ['feature_'+str(i) for i in range(feature_tmp.shape[1])]
return feature_tmp
def get_dataset(path = './Molecule_prediction_20200312'):
#读入数据
df_train = pd.read_csv(f'{path}/train_0312.csv')
df_test = | pd.read_csv(f'{path}/test_noLabel_0312.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import numpy as np
import datetime
import math
import os
import osr
import glob
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
import gdal
from joblib import Parallel, delayed
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = 0.001,
epsg=4326, cores=1):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
nc_paths = create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg)
args = [nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor]
print('\tApply HANTS on tiles...')
results = Parallel(n_jobs=cores)(delayed(HANTS_netcdf)(nc_path, args)
for nc_path in nc_paths)
if len(nc_paths) > 1:
Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg=4326):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
# ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.toordinal() for d in dates_dt]
os.chdir(rasters_path)
ras_ls = glob.glob('*.tif')
# Create tile parts
if (lat_n > 200 or lon_n > 200):
lat_n_amount = np.maximum(1,int(np.floor(lat_n/100)))
lon_n_amount = np.maximum(1,int(np.floor(lon_n/100)))
nc_path_part_names = nc_path.split('.')
nc_path_tiles = []
for lat_n_one in range(0, lat_n_amount):
for lon_n_one in range(0, lon_n_amount):
nc_path_tile = ''.join(nc_path_part_names[0] + "_h%03d_v%03d.nc" %(lon_n_one, lat_n_one))
nc_path_tiles = np.append(nc_path_tiles, nc_path_tile)
else:
nc_path_tiles = nc_path
i = 0
# Loop over the nc_paths
for nc_path_tile in nc_path_tiles:
i += 1
if lat_n_amount > 1:
lat_part = int(nc_path_tile[-6:-3])
lat_start = lat_part * 100
if int(lat_part) is not int(lat_n_amount-1):
lat_end = int((lat_part + 1) * 100)
else:
lat_end = int(lat_n)
else:
lat_start = int(0)
lat_end = int(lat_n)
if lon_n_amount > 1:
lon_part = int(nc_path_tile[-11:-8])
lon_start = int(lon_part * 100)
if int(lon_part) is not int(lon_n_amount-1):
lon_end = int((lon_part + 1) * 100)
else:
lon_end = int(lon_n)
else:
lon_start = int(0)
lon_end = int(lon_n)
# Define space dimention
lat_range = lat_ls[lat_start:lat_end]
lon_range = lon_ls[lon_start:lon_end]
geo_ex = tuple([lon_range[0] - 0.5*cellsize, cellsize, 0, lat_range[0] + cellsize * 0.5, 0, -cellsize])
# Create netcdf file
print('Creating netCDF file tile %s out of %s...' %(i,len(nc_path_tiles)))
nc_file = netCDF4.Dataset(nc_path_tile, 'w', format="NETCDF4_CLASSIC")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_end - lat_start)
lon_dim = nc_file.createDimension('longitude', lon_end - lon_start)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = spa_ref
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_ex
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time',))
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
original_var = nc_file.createVariable('original_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
original_var.long_name = 'original_values'
original_var.grid_mapping = 'crs'
original_var.add_offset = 0.00
original_var.scale_factor = Scaling_factor
original_var.set_auto_maskandscale(False)
print('\tVariables created')
# Fill in time and space dimention
lat_var[:] = lat_range
lon_var[:] = lon_range
time_var[:] = dates_ls
# Create memory example file
# empty array
empty_vec = pd.np.empty((lat_end - lat_start, lon_end - lon_start))
empty_vec[:] = -9999 * np.float(Scaling_factor)
dest_ex = Save_as_MEM(empty_vec, geo_ex, str(epsg))
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
Date_now = datetime.datetime.fromordinal(dates_ls[tt])
yyyy = str(Date_now.year)
mm = '%02d' %int(Date_now.month)
dd = '%02d' %int(Date_now.day)
# Raster
ras = name_format.format(yyyy=yyyy,mm=mm,dd=dd)
if ras in ras_ls:
data_in = os.path.join(rasters_path, ras)
dest = reproject_dataset_example(data_in, dest_ex)
array_tt = dest.GetRasterBand(1).ReadAsArray()
array_tt[array_tt<-9999] = -9999 * np.float(Scaling_factor)
original_var[tt, :, :] = np.int_(array_tt * 1./np.float(Scaling_factor))
else:
# Store values
original_var[tt, :, :] = np.int_(empty_vec * 1./np.float(Scaling_factor))
# Close file
nc_file.close()
print('NetCDF %s file created' %i)
# Return
return nc_path_tiles
def HANTS_netcdf(nc_path, args):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = args
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+', format="NETCDF4_CLASSIC")
nc_file.set_fill_on()
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[ztime, rows, cols] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((ztime, rows, cols))
outliers_hants = pd.np.empty((ztime, rows, cols))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
#print('Running HANTS...')
for m in range(rows):
for n in range(cols):
#print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[:, m, n])
y[pd.np.isnan(y)] = -9999
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta)
values_hants[:, m, n] = yr
outliers_hants[:, m, n] = outliers
counter = counter + 1
values_hants[values_hants<-9999] = -9999 * np.float(Scaling_factor)
hants_var = nc_file.createVariable('hants_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
hants_var.long_name = 'hants_values'
hants_var.grid_mapping = 'crs'
hants_var.add_offset = 0.00
hants_var.scale_factor = Scaling_factor
hants_var.set_auto_maskandscale(False)
combined_var = nc_file.createVariable('combined_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
combined_var.long_name = 'combined_values'
combined_var.grid_mapping = 'crs'
combined_var.add_offset = 0.00
combined_var.scale_factor = Scaling_factor
combined_var.set_auto_maskandscale(False)
outliers_var = nc_file.createVariable('outliers', 'i4',
('time', 'latitude', 'longitude'),
fill_value=-9999)
outliers_var.long_name = 'outliers'
outliers_var.grid_mapping = 'crs'
hants_var[:,:,:]= np.int_(values_hants * 1./np.float(Scaling_factor))
outliers_var[:,:,:] = outliers_hants
combined_var[:,:,:] = pd.np.where(outliers_hants,
np.int_(values_hants * 1./np.float(Scaling_factor)),
np.int_(original_values * 1./np.float(Scaling_factor)))
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', format="NETCDF4_CLASSIC")
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][:, lat_i, lon_i]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = -9999
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, -9999).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = -9999
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = pd.np.linalg.solve(A, za)
yr = pd.np.matmul(pd.np.transpose(mat), zr)
diffVec = sHiLo*(yr-y)
err = p*diffVec
err_ls = list(err)
err_sort = deepcopy(err)
err_sort.sort()
rankVec = [err_ls.index(f) for f in err_sort]
maxerr = diffVec[rankVec[-1]]
ready = (maxerr <= fet) | (nout == noutmax)
if (not ready):
i = ni - 1
j = rankVec[i]
while ((p[j]*diffVec[j] > 0.5*maxerr) & (nout < noutmax)):
p[j] = 0
outliers[0, j] = 1
nout += 1
i -= 1
if i == 0:
j = 0
else:
j = 1
return [yr, outliers]
def plot_point(nc_path, point, ylim=None):
'''
This function plots the original time series and the HANTS time series.
It can be used to assess the fit.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', format="NETCDF4_CLASSIC")
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
values_o = nc_file.variables['original_values'][lat_i, lon_i, :]
values_h = nc_file.variables['hants_values'][lat_i, lon_i, :]
if not ylim:
top = 1.15*max(pd.np.nanmax(values_o),
pd.np.nanmax(values_h))
bottom = 1.15*min(pd.np.nanmin(values_o),
pd.np.nanmin(values_h))
ylim = [bottom, top]
# Plot
plt.plot(time, values_h, 'r-', label='HANTS')
plt.plot(time, values_o, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Return
return True
def Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor):
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
geo_ex = tuple([lon_ls[0] - 0.5*cellsize, cellsize, 0, lat_ls[0] - cellsize * 0.5, 0, -cellsize])
dates_dt = | pd.date_range(start_date, end_date, freq='D') | pandas.date_range |
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from numpy.testing import assert_allclose
from pvlib.location import Location
from pvlib import tracking
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
index = pd.date_range(start='20180701T1200', freq='1s', periods=1)
apparent_zenith = pd.Series([10], index=index)
apparent_azimuth = pd.Series([180], index=index)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=index, dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
| assert_frame_equal(expect, tracker_data) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#Developed in Python 3.5
#This is a trading strategy model with a brute force optimizer - under construction
#also see DonchianTrendEfficiencyFilterSingleStockSingleFrequency.py
#R Multiple Finder; Trade Data Tracking; Graphs
#Import modules
import numpy as np
#import random as rand
import pandas as pd
#import time as t
#from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
#import matplotlib.pyplot as plt
import warnings
import math
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
import random as rand
import time
#Start timer
starttime = time.time()
#Assign ticker
Ticker1 = 'UVXY'
#Request data
Asset1 = YahooGrabber(Ticker1)
#Don't display warnings
warnings.filterwarnings("ignore", category = RuntimeWarning)
pd.options.mode.chained_assignment = None
#Declaration/Assignments
#Empty list
Empty = []
ParamsAndResultsList = []
#Empty dataframe
Trades = pd.DataFrame()
ParamsAndResults = pd.DataFrame()
#Number of iterations for optimizer
iterations = range(0,15)
#Constraints in percentages; both unimplemented
Commission = .005
Slippage = .004
#Iteration tracking
Counter = 0
#SubIndex column is a secondary index, it exists to help identify exits
Asset1['SubIndex'] = range(0,len(Asset1))
#Calculate log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
#ATR calculation using ATRwindow
Asset1['Method1'] = Asset1['High'] - Asset1['Low']
Asset1['Method2'] = abs((Asset1['High'] - Asset1['Close'].shift(1)))
Asset1['Method3'] = abs((Asset1['Low'] - Asset1['Close'].shift(1)))
Asset1['Method1'] = Asset1['Method1'].fillna(0)
Asset1['Method2'] = Asset1['Method2'].fillna(0)
Asset1['Method3'] = Asset1['Method3'].fillna(0)
Asset1['TrueRange'] = Asset1[['Method1','Method2','Method3']].max(axis = 1)
#Time series trimmer for in/out sample data
Asset1 = Asset1[-1250:] #In
#For number of iterations
for n in iterations:
#Variable windows
#Donchianwidow is used to find the min/max of the price range to make the long/short signal
#Smaller donchain window = more likely double days
donchianwindow = rand.randint(2,125)
#ATRwindow is used for volatility position sizing
ATRwindow = rand.randint(2,50)
if ATRwindow >= donchianwindow:
continue
#Stopwindow is used for trailing high/low used for long/short exits
stopwindow = rand.randint(2,100)
#Number of ATR distance between stop
numATR = rand.random() * 4
#Define starting equity
Equity = 100000000000
#Risk for first trade
RiskPerTrade = .005
DollarRisk = Equity * RiskPerTrade
#ATR in points; not %
Asset1['ATR'] = Asset1['TrueRange'].rolling(window = ATRwindow,
center=False).mean()
#Market top and bottom calculation
Asset1['RollingMax'] = Asset1['High'].rolling(window=donchianwindow, center=False).max()
Asset1['RollingMin'] = Asset1['Low'].rolling(window=donchianwindow, center=False).min()
#Rolling stops
Asset1['RollingLongStop'] = Asset1['Low'].rolling(window = stopwindow, center = False).min()
Asset1['RollingShortStop'] = Asset1['High'].rolling(window = stopwindow, center = False).max()
#Signal = Price </> min/max
#If price is greater than the max go long
Asset1['LongSignal'] = np.where(Asset1['High'] > Asset1['RollingMax'].shift(1), 1, 0)
#If price is less than the min go short
Asset1['ShortSignal'] = np.where(Asset1['Low'] < Asset1['RollingMin'].shift(1), 1, 0)
#If double signal days exist, then entry and P/L on those days will not be reflected correctly, spurious return stream
Asset1['DoubleDay'] = np.where(Asset1['LongSignal'] + Asset1['ShortSignal'] == 2, 1, 0)
if sum(Asset1['DoubleDay']) > 0:
continue
#Next two lines combines long signal and short signal columns into a single column
#If there is a double day then a short entry is recorded
Asset1['Signal'] = np.where(Asset1['LongSignal'] == 1, 1, 0)
Asset1['Signal'] = np.where(Asset1['ShortSignal'] == 1, -1, Asset1['Signal'])
#If Rolling Min/Max is still being computed, stay out of market
Asset1['Signal'] = np.where(Asset1['RollingMax'] == np.nan, 0, Asset1['Signal'])
#Index values for segmenting data for trade analysis
SignalDates = list(Asset1['Signal'].loc[(Asset1['Signal'] != 0)].index)
#Trade ATR on signal day
Asset1['TradeATR'] = np.where(Asset1['Signal'] != 0, Asset1['ATR'].shift(1), np.nan)
#Declare columns to record entry price and stop; assignment inside of while loop on per trade basis
Asset1['EntryPriceUnitOne'] = np.nan
Asset1['StopPriceUnitOne'] = np.nan
#On the first signal we record entry, create stop regime, record exit, record
#trade details, and then trim the time series to the next signal after exit. This process repeats.
#TradeSubset is a copy of Asset1 from the date of the first signal to the end of the time series
TradeSubset = Asset1.loc[(Asset1.index >= SignalDates[0])]
#Every trade is in the while loop. If a position exists that is still open
#at the end of the testing period, it is taken care of outside the while loop
#while Counter < 1: #Use this instead of the while loop to go a certain number of trades into the iteration
#While there is still a signal in the time series
while sum(abs(TradeSubset['Signal'])) != 0:
#Reset gap indicators
GapEntry = 0
GapExit = 0
#Reset Entry and Exit Price
EntryPrice = np.nan
ExitPrice = np.nan
#Signal dates
IndexOfEntry = TradeSubset.index[0]
#This is the ATR on the period before signal period
TradeATR = Asset1['ATR'].shift(1).loc[Asset1.index == TradeSubset.index[0]][0]
#Volatility position sizing based on nominal risk and market volatility; round down shares!!!
#Does not account for slippage / market impact..
numshares = (DollarRisk)/((TradeATR * numATR))//1
#1 = long; -1 = short
TradeDirection = TradeSubset['Signal'][0]
#For long trades
if TradeDirection == 1:
#Establish non-gap entry price based on previous period rolling max
EntryPrice = Asset1['RollingMax'].shift(1).loc[Asset1.index == TradeSubset.index[0]][0]
#Check for entry gap; Open higher than previous period rolling max
#If gap then buy on open
if TradeSubset['Open'][0] > EntryPrice:
#Enter on open
EntryPrice = TradeSubset['Open'][0]
#Record gap entry
GapEntry = 1
#Add slippage or market impact
EntryPrice = EntryPrice # + .01
#Assign to TradeSubset
TradeSubset['EntryPriceUnitOne'][0] = EntryPrice
#Calculate initial stop based on Direction and ATR
TradeSubset['StopPriceUnitOne'][0] = EntryPrice - (numATR * TradeATR)
#Forward fill stop
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].ffill(
limit = (stopwindow - 1))
#Add the trailing highest lows
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].fillna(TradeSubset['RollingLongStop'])
#We want the cumulative maximum; thus preventing the stop from moving down.
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].cummax()
#For every day the trade is open
for r in range(len(TradeSubset['StopPriceUnitOne'])):
#If low is lower than stop price
if TradeSubset['Low'].iloc[r] < TradeSubset['StopPriceUnitOne'].shift(1).iloc[r]:
#Record index of exit and get back to it
IndexOfExit = TradeSubset.index[r]
break
#Establish non-gap exit price based on previous period stop
try:
ExitPrice = TradeSubset['StopPriceUnitOne'].shift(1).loc[TradeSubset.index == IndexOfExit][0]
#Except if there is an open trade then no IndexOfExit exists
except IndexError:
break
#Check Open for exit gap through stop
if TradeSubset['Open'].loc[IndexOfExit] < ExitPrice:
#Record open price instead of stop price
ExitPrice = TradeSubset['Open'].loc[IndexOfExit]
#Record exit gap
GapExit = 1
#Calculate returns - slippage and commission go here.
TradePercentReturn = (ExitPrice - EntryPrice) / ((RiskPerTrade ** -1) * DollarRisk)
TradeDollarReturn = (ExitPrice - EntryPrice) * numshares
#For short trades
if TradeDirection == -1:
#Establish non-gap entry price based on previous period rolling max
EntryPrice = Asset1['RollingMin'].shift(1).loc[Asset1.index == TradeSubset.index[0]][0]
#Check for entry gap; Open higher than previous period rolling max
#If gap then buy on open
if TradeSubset['Open'][0] < EntryPrice:
#Record open price instead of stop price
EntryPrice = TradeSubset['Open'][0]
#Record gap entry
GapEntry = 1
#Add slippage or market impact
EntryPrice = EntryPrice # - .01
#Assign to TradeSubset
TradeSubset['EntryPriceUnitOne'][0] = EntryPrice
#Calculate initial stop based on Direction and ATR
TradeSubset['StopPriceUnitOne'][0] = EntryPrice + (numATR * TradeATR)
#Forward fill stop
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].ffill(
limit = (stopwindow - 1))
#Add the trailing lowest highs
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].fillna(TradeSubset['RollingShortStop'])
#We want the cumulative maximum; thus preventing the stop from moving down.
TradeSubset['StopPriceUnitOne'] = TradeSubset['StopPriceUnitOne'].cummin()
#For every day the trade is open
for r in range(len(TradeSubset['StopPriceUnitOne'])):
#If high is higher than stop price
if TradeSubset['High'].iloc[r] > TradeSubset['StopPriceUnitOne'].shift(1).iloc[r]:
#Record index of exit and get back to it
IndexOfExit = TradeSubset.index[r]
break
#Establish non-gap exit price based on previous period stop
try:
ExitPrice = TradeSubset['StopPriceUnitOne'].shift(1).loc[TradeSubset.index == IndexOfExit][0]
except IndexError:
break
#Check Open for gap through stop
if TradeSubset['Open'].loc[IndexOfExit] > ExitPrice:
#Record open price instead of stop price
ExitPrice = TradeSubset['Open'].loc[IndexOfExit]
#Record gap exit
GapExit = 1
TradePercentReturn = (EntryPrice - ExitPrice) / ((RiskPerTrade ** -1) * DollarRisk)
TradeDollarReturn = (EntryPrice - ExitPrice) * numshares
#If there is no exit signal, the trade is still open..
if math.isnan(ExitPrice) == True:
break
LengthOfTrade = TradeSubset['SubIndex'].loc[IndexOfExit] - TradeSubset['SubIndex'].loc[IndexOfEntry]
#The SubIndex of the exit date is for continuing looking for rentry in new trade subset
SubIndexOfExit = TradeSubset['SubIndex'].loc[IndexOfExit]
#R Multiple calculation, return based on initial risk
RMultiple = TradeDollarReturn / DollarRisk
#Log individual trade details in the Trade dataframe
Empty.append(TradeDirection)
Empty.append(IndexOfEntry)
Empty.append(IndexOfExit)
Empty.append(SubIndexOfExit)
Empty.append(LengthOfTrade)
Empty.append(GapEntry)
Empty.append(GapExit)
Empty.append(EntryPrice)
Empty.append(ExitPrice)
Empty.append(numshares)
Empty.append(TradeATR)
Empty.append(round(TradePercentReturn, 2))
Empty.append(TradeDollarReturn)
Empty.append(DollarRisk)
Empty.append(RMultiple)
#List to series
Emptyseries = | pd.Series(Empty) | pandas.Series |
import itertools
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
# from https://tidyr.tidyverse.org/reference/complete.html
df = pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
)
columns = [
["group", "item_id", "item_name"],
["group", ("item_id", "item_name")],
]
expected_output = [
pd.DataFrame(
{
"group": [1, 1, 1, 1, 2, 2, 2, 2],
"item_id": [1, 1, 2, 2, 1, 1, 2, 2],
"item_name": ["a", "b", "a", "b", "a", "b", "a", "b"],
"value1": [1.0, np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 2.0],
"value2": [4.0, np.nan, np.nan, 6.0, np.nan, np.nan, np.nan, 5.0],
}
),
pd.DataFrame(
{
"group": [1, 1, 2, 2],
"item_id": [1, 2, 1, 2],
"item_name": ["a", "b", "a", "b"],
"value1": [1.0, 3.0, np.nan, 2.0],
"value2": [4.0, 6.0, np.nan, 5.0],
}
),
]
complete_parameters = [
(dataframe, columns, output)
for dataframe, (columns, output) in itertools.product(
[df], zip(columns, expected_output)
)
]
@pytest.mark.parametrize("df,columns,output", complete_parameters)
def test_complete(df, columns, output):
"""Test the complete function, with and without groupings."""
assert_frame_equal(df.complete(columns), output)
# from http://imachordata.com/2016/02/05/you-complete-me/
@pytest.fixture
def df1():
return pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
def test_fill_value(df1):
"""Test fill_value argument."""
output1 = pd.DataFrame(
{
"Year": [1999, 1999, 2000, 2000, 2004, 2004],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1, 4.0, 0, 5, 8, 2],
}
)
result = df1.complete(
columns=["Year", "Taxon"], fill_value={"Abundance": 0}
)
assert_frame_equal(result, output1)
def test_fill_value_all_years(df1):
"""
Test the complete function accurately replicates for all the years
from 1999 to 2004.
"""
output1 = pd.DataFrame(
{
"Year": [
1999,
1999,
2000,
2000,
2001,
2001,
2002,
2002,
2003,
2003,
2004,
2004,
],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2],
}
)
result = df1.complete(
columns=[
{"Year": range(df1.Year.min(), df1.Year.max() + 1)},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, output1)
def test_type_columns(df1):
"""Raise error if columns is not a list object."""
with pytest.raises(TypeError):
df1.complete(columns="Year")
def test_empty_columns(df1):
"""Raise error if columns is empty"""
with pytest.raises(ValueError):
df1.complete(columns=[])
def test_fill_value_is_a_dict(df1):
"""Raise error if fill_value is not a dictionary"""
with pytest.raises(TypeError):
df1.complete(columns=["Year", "Taxon"], fill_value=0)
frame = pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
wrong_columns = (
(frame, ["b", "Year"]),
(frame, [{"Yayay": range(7)}]),
(frame, ["Year", ["Abundant", "Taxon"]]),
(frame, ["Year", ("Abundant", "Taxon")]),
)
empty_sub_columns = [
(frame, ["Year", []]),
(frame, ["Year", {}]),
(frame, ["Year", ()]),
(frame, ["Year", set()]),
]
@pytest.mark.parametrize("frame,wrong_columns", wrong_columns)
def test_wrong_columns(frame, wrong_columns):
"""Test that KeyError is raised if wrong column is supplied."""
with pytest.raises(KeyError):
frame.complete(columns=wrong_columns)
@pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns)
def test_empty_subcols(frame, empty_sub_cols):
"""Raise ValueError for an empty container in columns'"""
with pytest.raises(ValueError):
frame.complete(columns=empty_sub_cols)
# https://stackoverflow.com/questions/32874239/
# how-do-i-use-tidyr-to-fill-in-completed-rows-within-each-value-of-a-grouping-var
def test_grouping_first_columns():
"""Test complete function when the first entry in columns is
a grouping."""
df2 = pd.DataFrame(
{
"id": [1, 2, 3],
"choice": [5, 6, 7],
"c": [9.0, np.nan, 11.0],
"d": [
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
],
}
)
output2 = pd.DataFrame(
{
"id": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"c": [9.0, 9.0, 9.0, np.nan, np.nan, np.nan, 11.0, 11.0, 11.0],
"d": [
pd.NaT,
pd.NaT,
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
| pd.Timestamp("2015-09-29 00:00:00") | pandas.Timestamp |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr= | pd.DataFrame(Xredata) | pandas.DataFrame |
import os
import sys
sys.path.append('../')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import flags, app
from tensorflow_probability.python.internal.samplers import split_seed
from tqdm import tqdm
from filterflow.base import State
from filterflow.models.optimal_proposal_linear_gaussian import make_filter, make_optimal_filter
from filterflow.resampling import MultinomialResampler, SystematicResampler, StratifiedResampler, RegularisedTransform
from filterflow.resampling.criterion import NeverResample, AlwaysResample, NeffCriterion
from filterflow.resampling.differentiable import PartiallyCorrectedRegularizedTransform
from filterflow.resampling.differentiable.loss import SinkhornLoss
from filterflow.resampling.differentiable.optimized import OptimizedPointCloud
from filterflow.resampling.differentiable.optimizer.sgd import SGD
import pickle
from scripts.optimal_proposal_common import get_data, ResamplingMethodsEnum, get_observation_matrix, \
get_observation_covariance, get_transition_covariance, get_transition_matrix
def pickle_obj(obj, file_path):
with open(file_path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
@tf.function
def routine(pf, initial_state, observations_dataset, T, log_phi_x, phi_y, seed):
with tf.GradientTape() as tape:
tape.watch([log_phi_x, phi_y])
final_state = pf(initial_state, observations_dataset, T, seed=seed, return_final=True)
res = -tf.reduce_mean(final_state.log_likelihoods)
return res, tape.gradient(res, [log_phi_x, phi_y]), tf.reduce_mean(final_state.ess)
def get_gradient_descent_function():
# This is a trick because tensorflow doesn't allow you to create variables inside a decorated function
@tf.function
def gradient_descent(pf, initial_state, observations_dataset, T, n_iter, optimizer, log_phi_x, phi_y,
initial_values, change_seed, seed):
variables = [log_phi_x, phi_y]
reset_operations = [k.assign(v) for k, v in zip(variables, initial_values)]
loss = tf.TensorArray(dtype=tf.float32, size=n_iter, dynamic_size=False)
ess = tf.TensorArray(dtype=tf.float32, size=n_iter, dynamic_size=False)
filter_seed, seed = split_seed(seed, n=2, salt='gradient_descent')
with tf.control_dependencies(reset_operations):
for i in tf.range(n_iter):
loss_value, grads, average_ess = routine(pf, initial_state, observations_dataset, T, log_phi_x, phi_y,
seed)
if change_seed:
filter_seed, seed = split_seed(filter_seed, n=2)
loss = loss.write(tf.cast(i, tf.int32), loss_value)
ess = ess.write(tf.cast(i, tf.int32), average_ess)
grads = [tf.clip_by_value(grad, -100., 100.) for grad in grads]
optimizer.apply_gradients(zip(grads, variables))
tf.print('\rStep', i, '/', n_iter, end='')
return [tf.convert_to_tensor(var) for var in variables], loss.stack(), ess.stack()
return gradient_descent
def compare_learning_rates(pf, initial_state, observations_dataset, T, log_phi_x, phi_y, initial_values,
n_iter, optimizer_maker, learning_rates, filter_seed, use_xla, change_seed):
loss_profiles = []
ess_profiles = []
for learning_rate in tqdm(learning_rates):
optimizer = optimizer_maker(learning_rate=learning_rate)
gradient_descent_function = get_gradient_descent_function()
final_variables, loss_profile, ess_profile = gradient_descent_function(pf, initial_state, observations_dataset,
T, n_iter,
optimizer, log_phi_x, phi_y,
initial_values, change_seed, filter_seed)
loss_profiles.append(-loss_profile.numpy() / T)
ess_profiles.append(ess_profile.numpy())
return loss_profiles, ess_profiles
def plot_losses_vs_ess(loss_profiles_df, ess_profiles_df, filename, savefig, dx, dy, dense, T, n_particles, change_seed,
batch_size, optimal_filter_val, kalman_val, n_iter, mse_table, n_data):
fig, ax = plt.subplots(figsize=(5, 3))
loss_profiles_df.style.float_format = '${:,.1f}'.format
loss_profiles_df.plot(ax=ax, legend=False)
ax.axhline(y=optimal_filter_val, color="k", linestyle=':')
ax.axhline(y=kalman_val, color="k")
ax.set_xlim(0, n_iter)
ax1 = ax.twinx()
ess_profiles_df.plot.area(ax=ax1, legend=False, linestyle='--', alpha=0.33, stacked=False)
# ax.set_ylim(-2.5, -1.7)
ax1.set_ylim(1, n_particles)
csv_fp = os.path.join('./charts/',
f'global_variational_different_loss_df_lr_loss_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}.csv')
loss_profiles_df.to_csv(csv_fp)
csv_fp = os.path.join('./charts/',
f'global_variational_different_ess_df_lr_loss_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}.csv')
ess_profiles_df.to_csv(csv_fp)
# ax.legend()
fig.tight_layout()
filename = f'global_variational_different_lr_loss_ess_{filename}_N_{n_particles}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}_change_seed_{change_seed}_batch_size_{batch_size}_ndata_{n_data}'
if savefig:
fig.savefig(os.path.join('./charts/',
filename + '.png'))
mse_table.to_csv(os.path.join('./tables/', filename + '.csv'),
float_format='%.5f')
else:
print(mse_table)
fig.suptitle(f'variational_different_loss_ess_{filename}_dx_{dx}_dy_{dy}_dense_{dense}_T_{T}')
plt.show()
def plot_variables(variables_df, filename, savefig):
fig, ax = plt.subplots(figsize=(5, 5))
variables_df.plot(ax=ax)
fig.tight_layout()
if savefig:
fig.savefig(os.path.join('./charts/', f'global_variational_different_lr_variables_{filename}.png'))
else:
fig.suptitle(f'variational_different_lr_variables_{filename}')
plt.show()
def resampling_method_factory(resampling_method_enum, resampling_kwargs):
if resampling_method_enum == ResamplingMethodsEnum.MULTINOMIAL:
resampling_method = MultinomialResampler()
elif resampling_method_enum == ResamplingMethodsEnum.SYSTEMATIC:
resampling_method = SystematicResampler()
elif resampling_method_enum == ResamplingMethodsEnum.STRATIFIED:
resampling_method = StratifiedResampler()
elif resampling_method_enum == ResamplingMethodsEnum.REGULARIZED:
resampling_method = RegularisedTransform(**resampling_kwargs)
elif resampling_method_enum == ResamplingMethodsEnum.VARIANCE_CORRECTED:
regularized_resampler = RegularisedTransform(**resampling_kwargs)
resampling_method = PartiallyCorrectedRegularizedTransform(regularized_resampler)
elif resampling_method_enum == ResamplingMethodsEnum.OPTIMIZED:
lr = resampling_kwargs.pop('lr', resampling_kwargs.pop('learning_rate', 0.1))
loss = SinkhornLoss(**resampling_kwargs, symmetric=True)
optimizer = SGD(loss, lr=lr, decay=0.95)
regularized_resampler = RegularisedTransform(**resampling_kwargs)
resampling_method = OptimizedPointCloud(optimizer, intermediate_resampler=regularized_resampler)
else:
raise ValueError(f'resampling_method_name {resampling_method_enum} is not a valid ResamplingMethodsEnum')
return resampling_method
def main(resampling_method_value, resampling_neff, learning_rates=(1e-4, 1e-3), resampling_kwargs=None,
alpha=0.42, dx=10, dy=3, observation_covariance=1., dense=False, T=20, batch_size=1, n_particles=25,
data_seed=0, n_data=50, n_iter=50, savefig=False, filter_seed=0, use_xla=False, change_seed=True):
transition_matrix = get_transition_matrix(alpha, dx)
transition_covariance = get_transition_covariance(dx)
observation_matrix = get_observation_matrix(dx, dy, dense)
observation_covariance = get_observation_covariance(observation_covariance, dy)
resampling_method_enum = ResamplingMethodsEnum(resampling_method_value)
np_random_state = np.random.RandomState(seed=data_seed)
observation_matrix = tf.convert_to_tensor(observation_matrix)
transition_covariance_chol = tf.linalg.cholesky(transition_covariance)
observation_covariance_chol = tf.linalg.cholesky(observation_covariance)
initial_particles = np_random_state.normal(0., 1., [batch_size, n_particles, dx]).astype(np.float32)
initial_state = State(initial_particles)
if resampling_neff == 0.:
resampling_criterion = NeverResample()
elif resampling_neff == 1.:
resampling_criterion = AlwaysResample()
else:
resampling_criterion = NeffCriterion(resampling_neff, True)
optimal_smc = make_optimal_filter(observation_matrix, transition_matrix, observation_covariance_chol,
transition_covariance_chol, MultinomialResampler(), resampling_criterion)
if resampling_kwargs is None:
resampling_kwargs = {}
resampling_method = resampling_method_factory(resampling_method_enum, resampling_kwargs)
datas = []
lls = []
observation_datasets = []
optimal_lls = []
log_phi_x_0 = tf.ones(dx)
phi_y_0 = tf.zeros(dy)
for _ in range(n_data):
data, ll = get_data(transition_matrix, observation_matrix, transition_covariance, observation_covariance, T,
np_random_state)
datas.append(data)
lls.append(ll / T)
observation_dataset = tf.data.Dataset.from_tensor_slices(data)
observation_datasets.append(observation_dataset)
final_state = optimal_smc(initial_state, observation_dataset, T, None, True, filter_seed)
optimal_lls.append(final_state.log_likelihoods.numpy().mean() / T)
log_phi_x = tf.Variable(log_phi_x_0, trainable=True)
phi_y = tf.Variable(phi_y_0, trainable=True)
smc = make_filter(observation_matrix, transition_matrix, observation_covariance_chol,
transition_covariance_chol, resampling_method, resampling_criterion,
log_phi_x, phi_y)
def optimizer_maker(learning_rate):
# tf.function doesn't like creating variables. This is a way to create them outside the graph
# We can't reuse the same optimizer because it would be giving a warmed-up momentum to the ones run later
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
return optimizer
initial_values = [log_phi_x_0, phi_y_0]
losses_list = []
ess_profiles_list = []
mean_errors = []
for observation_dataset in observation_datasets:
try:
losses, ess_profiles = compare_learning_rates(smc, initial_state, observation_dataset, T, log_phi_x, phi_y,
initial_values, n_iter, optimizer_maker, learning_rates,
filter_seed,
use_xla, change_seed)
except:
print('one dataset failed, ignoring')
continue
losses_df = pd.DataFrame(np.stack(losses).T, columns=np.log10(learning_rates))
ess_df = pd.DataFrame(np.stack(ess_profiles).T, columns=np.log10(learning_rates))
losses_df.columns.name = 'log learning rate'
losses_df.columns.epoch = 'epoch'
ess_df.columns.name = 'log learning rate'
ess_df.columns.epoch = 'epoch'
losses_list.append(losses_df)
ess_profiles_list.append(ess_df)
delta_phi_m_1 = tf.linalg.diag(tf.exp(-log_phi_x))
diff_cov = optimal_smc._proposal_model._sigma - delta_phi_m_1 @ transition_covariance
approx_error = tf.linalg.diag_part(diff_cov).numpy()
mean_error = np.sqrt(np.nanmean(approx_error ** 2))
mean_errors.append(mean_error)
losses_data = pd.concat(losses_list, axis=1)
ess_data = pd.concat(ess_profiles_list, axis=1)
mean_data = pd.DataFrame([[np.mean(mean_errors)]], index= | pd.MultiIndex.from_tuples([(batch_size, n_particles)]) | pandas.MultiIndex.from_tuples |
import os
import glob
import pathlib
import re
import base64
import pandas as pd
from datetime import datetime, timedelta
# https://www.pythonanywhere.com/forums/topic/29390/ for measuring the RAM usage on pythonanywhere
class defichainAnalyticsModelClass:
def __init__(self):
workDir = os.path.abspath(os.getcwd())
self.dataPath = workDir[:-9] + '/data/'
# data for controller/views
self.dailyData = pd.DataFrame()
self.hourlyData = pd.DataFrame()
self.minutelyData = pd.DataFrame()
self.lastRichlist = None
self.snapshotData = None
self.changelogData = None
# last update of csv-files
self.updated_nodehubIO = None
self.updated_allnodes = None
self.updated_extractedRichlist = None
self.updated_tradingData = None
self.updated_blocktime = None
self.updated_dexHourly = None
self.update_dexMinutely = None
self.updated_daa = None
self.updated_LastRichlist = None
self.updated_dexVolume = None
self.updated_tokenCryptos = None
self.updated_twitterData = None
self.updated_twitterFollower = None
self.update_snapshotData = None
self.update_changelogData = None
self.update_incomeVisits = None
self.update_portfolioDownloads = None
self.update_promoDatabase = None
self.update_analyticsVisits = None
self.updated_hourlyDEXTrades = None
self.update_MNmonitor = None
self.updated_dfx = None
self.update_DFIsignal = None
# background image for figures
with open(workDir + "/assets/analyticsLandscapeGrey2.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
self.figBackgroundImage = "data:image/png;base64," + encoded_string # Add the prefix that plotly will want when using the string as source
#### DAILY DATA #####
def loadDailyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadDailyTradingData()
self.loadExtractedRichlistData()
self.calcOverallTVLdata()
self.loadDailyBlocktimeData()
self.loadDAAData()
self.loadTwitterData()
self.loadTwitterFollowerData()
self.loadIncomeVisitsData()
self.loadPortfolioDownloads()
self.loadPromoDatabase()
self.loadMNMonitorDatabase()
self.loadAnalyticsVisitsData()
self.loadDFIsignalDatabase()
def loadMNnodehub(self):
print('>>>> Start update nodehub.IO data ... <<<<')
filePath = self.dataPath + 'mnNodehub.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_nodehubIO:
nodehubData = pd.read_csv(filePath, index_col=0)
nodehubData.rename(columns={"amount": "nbMNNodehub"}, inplace=True)
ind2Delete = self.dailyData.columns.intersection(nodehubData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(nodehubData['nbMNNodehub'], how='outer', left_index=True, right_index=True)
self.updated_nodehubIO = fileInfo.stat()
print('>>>> nodehub data loaded from csv-file <<<<')
def loadMNAllnodes(self):
print('>>>> Start update allnodes data ... <<<<')
filePath = self.dataPath + 'mnAllnodes.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_allnodes:
allnodesData = pd.read_csv(filePath, index_col=0)
allnodesData.set_index('date', inplace=True)
ind2Delete = self.dailyData.columns.intersection(allnodesData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(allnodesData['nbMNAllnode'], how='outer', left_index=True, right_index=True)
self.updated_allnodes = fileInfo.stat()
print('>>>> allnodes data loaded from csv-file <<<<')
def loadExtractedRichlistData(self):
self.loadMNnodehub() # number masternode hosted by nodehub must be load here to ensure correct values for other and relative representation
self.loadMNAllnodes() # number masternode hosted by Allnodes must be load here to ensure correct values for other and relative representation
print('>>>> Start update extracted richlist data ... <<<<')
filePath = self.dataPath + 'extractedDFIdata.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_extractedRichlist:
extractedRichlist = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(extractedRichlist.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(extractedRichlist, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.dailyData['nbMNOther'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']-self.dailyData['nbMydefichainId']-self.dailyData['nbMNNodehub'].fillna(0)-self.dailyData['nbMNAllnode'].fillna(0)
self.dailyData['nbMNnonCake'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']
self.dailyData['nbMnCakeIdRelative'] = self.dailyData['nbMnCakeId']/self.dailyData['nbMnId']*100
self.dailyData['nbMNOtherRelative'] = self.dailyData['nbMNOther'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMydefichainRelative'] = self.dailyData['nbMydefichainId'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNNodehubRelative'] = self.dailyData['nbMNNodehub'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNAllnodeRelative'] = self.dailyData['nbMNAllnode'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked10Relative'] = self.dailyData['nbMNlocked10'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked5Relative'] = self.dailyData['nbMNlocked5'] / self.dailyData['nbMnId'] * 100
# extracting DFI in Liquidity-Mining
lmCoins = pd.DataFrame(index=self.dailyData.index)
lmCoins['BTC_pool'] = self.hourlyData.groupby('Date')['BTC-DFI_reserveB'].first()
lmCoins['ETH_pool'] = self.hourlyData.groupby('Date')['ETH-DFI_reserveB'].first()
lmCoins['USDT_pool'] = self.hourlyData.groupby('Date')['USDT-DFI_reserveB'].first()
lmCoins['DOGE_pool'] = self.hourlyData.groupby('Date')['DOGE-DFI_reserveB'].first()
lmCoins['LTC_pool'] = self.hourlyData.groupby('Date')['LTC-DFI_reserveB'].first()
lmCoins['USDC_pool'] = self.hourlyData.groupby('Date')['USDC-DFI_reserveB'].first()
lmCoins['overall'] = lmCoins['BTC_pool'] + lmCoins['ETH_pool'] + lmCoins['USDT_pool'] + lmCoins['DOGE_pool'].fillna(0) + lmCoins['LTC_pool'].fillna(0) + lmCoins['USDC_pool'] .fillna(0)
self.dailyData['lmDFI'] = lmCoins['overall']
# sum of addresses and DFI
self.dailyData['nbOverall'] = self.dailyData['nbMnId'] + self.dailyData['nbOtherId']
self.dailyData['circDFI'] = self.dailyData['mnDFI'] + self.dailyData['otherDFI'] \
+ self.dailyData['tokenDFI'].fillna(0) + self.dailyData['lmDFI'].fillna(0) + self.dailyData['erc20DFI'].fillna(0) \
- (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
self.dailyData['totalDFI'] = self.dailyData['circDFI'] + self.dailyData['fundDFI'] + self.dailyData['foundationDFI'].fillna(0) \
+ self.dailyData['burnedDFI'].fillna(method="ffill") + (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
# calc market cap data in USD and BTC
print('>>>>>>>> Update market cap in loadExtractedRichlistData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
# calculate daily change in addresses and DFI amount
self.dailyData['diffDate'] = pd.to_datetime(self.dailyData.index).to_series().diff().values
self.dailyData['diffDate'] = self.dailyData['diffDate'].fillna(pd.Timedelta(seconds=0)) # set nan-entry to timedelta 0
self.dailyData['diffDate'] = self.dailyData['diffDate'].apply(lambda x: float(x.days))
self.dailyData['diffNbOther'] = self.dailyData['nbOtherId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbMN'] = self.dailyData['nbMnId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbNone'] = None
self.dailyData['diffotherDFI'] = self.dailyData['otherDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffmnDFI'] = self.dailyData['mnDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffundDFI'] = self.dailyData['fundDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffoundationDFI'] = self.dailyData['foundationDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffLMDFI'] = self.dailyData['lmDFI'].diff() / self.dailyData['diffDate']
self.updated_extractedRichlist = fileInfo.stat()
print('>>>> Richlist data loaded from csv-file <<<<')
def calcOverallTVLdata(self):
self.dailyData['tvlMNDFI'] = self.dailyData['nbMnId'] * ((pd.to_datetime(self.dailyData.index)<pd.Timestamp('2021-03-02')) * 1 * 1000000 + \
(pd.to_datetime(self.dailyData.index)>=pd.Timestamp('2021-03-02')) * 1 * 20000)
dexLockedDFI = (self.hourlyData['BTC-DFI_lockedDFI']+self.hourlyData['ETH-DFI_lockedDFI']+self.hourlyData['USDT-DFI_lockedDFI'] +
self.hourlyData['DOGE-DFI_lockedDFI'].fillna(0)+self.hourlyData['LTC-DFI_lockedDFI'].fillna(0) +
self.hourlyData['BCH-DFI_lockedDFI'].fillna(0) + self.hourlyData['USDC-DFI_lockedDFI'].fillna(0))
dexLockedDFI.index = dexLockedDFI.index.floor('D').astype(str) # remove time information, only date is needed
self.dailyData['tvlDEXDFI'] = dexLockedDFI.groupby(level=0).first()
def loadDailyTradingData(self):
print('>>>> Start update trading data ... <<<<')
filePath = self.dataPath + 'dailyTradingResultsDEX.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tradingData:
dailyTradingResults = pd.read_csv(self.dataPath+'dailyTradingResultsDEX.csv',index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyTradingResults.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyTradingResults, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calc market cap data in USD and BTC (same as in loadExtractedRichlistData to get updated price information
if 'circDFI' in self.dailyData.columns:
print('>>>>>>>> Update market cap in loadDailyTradingData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
self.updated_tradingData = fileInfo.stat()
print('>>>> Trading data loaded from csv-file <<<<')
def loadDailyBlocktimeData(self):
print('>>>> Start update blocktime data ... <<<<')
filePath = self.dataPath + 'BlockListStatistics.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_blocktime:
dailyBlocktimeData = pd.read_csv(filePath, index_col=0)
dailyBlocktimeData['tps'] = dailyBlocktimeData['txCount'] / (24 * 60 * 60)
ind2Delete = self.dailyData.columns.intersection(dailyBlocktimeData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyBlocktimeData, how='outer', left_index=True,right_index=True) # add new columns to daily table
self.updated_blocktime = fileInfo.stat()
print('>>>> Blocktime data loaded from csv-file <<<<')
def loadDAAData(self):
print('>>>> Start update DAA data ... <<<<')
filePath = self.dataPath + 'analyzedDataDAA.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_daa:
dailyDAAData = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyDAAData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyDAAData, how='outer', left_index=True, right_on='Date') # add new columns to daily table
self.dailyData.set_index('Date', inplace=True)
self.dailyData.sort_index(inplace=True)
self.updated_daa = fileInfo.stat()
print('>>>> DAA data loaded from csv-file <<<<')
def loadTwitterData(self):
print('>>>> Start update twitter data ... <<<<')
filePath = self.dataPath + 'analyzedTwitterData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterData:
twitterData = pd.read_csv(filePath, index_col=0)
columns2update = ['overall_Activity', 'defichain_Activity', 'dfi_Activity', 'overall_Likes', 'overall_UniqueUserOverall', 'overall_UniqueUserTweet', 'overall_UniqueUserReply', 'overall_UniqueUserRetweet']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterData = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadTwitterFollowerData(self):
print('>>>> Start update twitter follower data ... <<<<')
filePath = self.dataPath + 'TwitterData_follower.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterFollower:
twitterFollowData = pd.read_csv(filePath, index_col=0)
twitterFollowData.set_index('Date',inplace=True)
columns2update = ['Follower', 'followedToday', 'unfollowedToday']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterFollowData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterFollower = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadIncomeVisitsData(self):
print('>>>> Start update income visits data ... <<<<')
filePath = self.dataPath + 'dataVisitsIncome.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_incomeVisits:
incomeVisitsData = pd.read_csv(filePath, index_col=0)
incomeVisitsData.rename(columns={'0': 'incomeVisits'}, inplace=True)
incomeVisitsData.set_index(incomeVisitsData.index.str[:10], inplace=True) # just use date information without hh:mm
columns2update = ['incomeVisits']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(incomeVisitsData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_incomeVisits = fileInfo.stat()
print('>>>> Income visits data loaded from csv-file <<<<')
def loadPortfolioDownloads(self):
print('>>>> Start update portfolio downloads data ... <<<<')
filePath = self.dataPath + 'dataPortfolioDownloads.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_portfolioDownloads:
portfolioRawData = pd.read_csv(filePath)
columns2update = ['PortfolioWindows', 'PortfolioMac', 'PortfolioLinux']
dfPortfolioData = pd.DataFrame(index=portfolioRawData['DateCaptured'].unique(), columns=columns2update)
dfPortfolioData['PortfolioWindows'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Windows.sum()
dfPortfolioData['PortfolioMac'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Mac.sum()
dfPortfolioData['PortfolioLinux'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Linux.sum()
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfPortfolioData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_portfolioDownloads = fileInfo.stat()
print('>>>> Portfolio downloads data loaded from csv-file <<<<')
def loadPromoDatabase(self):
print('>>>> Start update DefiChain promo database ... <<<<')
filePath = self.dataPath + 'defichainPromoData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_promoDatabase:
promoRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['postActive', 'mediaActive', 'incentivePointsToday', 'incentiveUsers']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(promoRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_promoDatabase = fileInfo.stat()
print('>>>> DefiChain promo database loaded from csv-file <<<<')
def loadMNMonitorDatabase(self):
print('>>>> Start update masternode monitor database ... <<<<')
filePath = self.dataPath + 'masternodeMonitorData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_MNmonitor:
monitorRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['nbMasternodes', 'nbAccounts']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(monitorRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_MNmonitor = fileInfo.stat()
print('>>>> MN Monitor database loaded from csv-file <<<<')
def loadAnalyticsVisitsData(self):
print('>>>> Start update raw data analytics visits ... <<<<')
filePath = self.dataPath + 'rawDataUserVisit.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_analyticsVisits:
analyticsRawVisitsData = pd.read_csv(filePath, index_col=0)
analyticsRawVisitsData['visitDate'] = pd.to_datetime(analyticsRawVisitsData.visitTimestamp).dt.date
analyticsVisitData = analyticsRawVisitsData.groupby('visitDate').count()
analyticsVisitData.rename(columns={'visitTimestamp': 'analyticsVisits'}, inplace=True)
columns2update = ['analyticsVisits']
analyticsVisitData.index = analyticsVisitData.index.map(str) # change index from dt to str format
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(analyticsVisitData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_analyticsVisits = fileInfo.stat()
print('>>>> Analytics visits data loaded from csv-file <<<<')
def loadDFIsignalDatabase(self):
print('>>>> Start update DFI-signal database ... <<<<')
filePath = self.dataPath + 'dfiSignalData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_DFIsignal:
dfiSignalRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['user_count','masternode_count','messages_sent','commands_received','minted_blocks']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfiSignalRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_DFIsignal = fileInfo.stat()
print('>>>> DFI-Signal database loaded from csv-file <<<<')
#### HOURLY DATA ####
def loadHourlyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadTokenCrypto()
self.loadHourlyDEXTrades()
self.loadDFXdata()
def loadHourlyDEXdata(self):
print('>>>> Start update hourly DEX data ... <<<<')
filePath = self.dataPath + 'LMPoolData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexHourly:
hourlyDEXData = pd.read_csv(filePath, index_col=0)
hourlyDEXData['timeRounded'] = pd.to_datetime(hourlyDEXData.Time).dt.floor('H')
hourlyDEXData.set_index(['timeRounded'], inplace=True)
hourlyDEXData['reserveA_DFI'] = hourlyDEXData['reserveA'] / hourlyDEXData['DFIPrices']
for poolSymbol in hourlyDEXData.symbol.dropna().unique():
df2Add = hourlyDEXData[hourlyDEXData.symbol == poolSymbol]
df2Add = df2Add.drop(columns=['Time', 'symbol'])
# calculate locked DFI and corresponding values
df2Add = df2Add.assign(lockedDFI=df2Add['reserveB'] + df2Add['reserveA_DFI'])
df2Add = df2Add.assign(lockedUSD=df2Add['lockedDFI']*hourlyDEXData[hourlyDEXData.symbol == 'USDT-DFI'].DFIPrices)
df2Add = df2Add.assign(lockedBTC=df2Add['lockedDFI'] * hourlyDEXData[hourlyDEXData.symbol == 'BTC-DFI'].DFIPrices)
# calculate relative price deviations
df2Add = df2Add.assign(relPriceDevCoingecko=((df2Add['DFIPrices'] - df2Add['reserveA/reserveB'])/df2Add['DFIPrices']))
df2Add = df2Add.assign(relPriceDevBittrex=((df2Add['DFIPricesBittrex'] - df2Add['reserveA/reserveB']) / df2Add['DFIPricesBittrex']))
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol+'_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.hourlyData['Date'] = pd.to_datetime(self.hourlyData.index).strftime('%Y-%m-%d')
self.updated_dexHourly = fileInfo.stat()
print('>>>> Hourly DEX data loaded from csv-file <<<<')
def loadDEXVolume(self):
print('>>>> Start update DEX volume data ... <<<<')
filePath = self.dataPath + 'DEXVolumeData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexVolume:
volumeData = pd.read_csv(filePath, index_col=0)
volumeData['timeRounded'] = pd.to_datetime(volumeData.Time).dt.floor('H')
volumeData.set_index(['timeRounded'], inplace=True)
for poolSymbol in volumeData['base_name'].unique():
df2Add = volumeData[volumeData['base_name']==poolSymbol][['base_volume', 'quote_volume']]
df2Add['VolTotal'] = df2Add[['base_volume', 'quote_volume']].sum(axis=1)
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol + '_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calculate total volume after merge of data
self.hourlyData['VolTotal'] = self.hourlyData['BTC_VolTotal']*0 # only use rows with data; BTC was the first pool and have to most data (beside ETH, USDT)
for poolSymbol in volumeData['base_name'].unique():
self.hourlyData['VolTotal'] = self.hourlyData['VolTotal'] + self.hourlyData[poolSymbol+'_'+'VolTotal'].fillna(0)
self.hourlyData['VolTotalCoingecko'] = volumeData[volumeData['base_name']=='BTC']['coingeckoVolume']
self.updated_dexVolume = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadHourlyDEXTrades(self):
print('>>>> Start update hourly DEX trade data ... <<<<')
filePath = self.dataPath + 'hourlyDEXTrades.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_hourlyDEXTrades:
hourlyTrades = pd.read_csv(filePath, index_col=0)
hourlyTrades.fillna(0, inplace=True)
hourlyTrades.index = pd.to_datetime(hourlyTrades.index).tz_localize(None)
columns2update = []
currName = ['BTC', 'ETH', 'USDT', 'DOGE', 'LTC', 'BCH', 'USDC', 'DFI']
for ind in range(7):
hourlyTrades['volume'+currName[ind]+'buyDFI'] = hourlyTrades[currName[ind]+'pool_base'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
hourlyTrades['volume'+currName[ind]+'sellDFI'] = hourlyTrades[currName[ind]+'pool_quote'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
columns2update.extend(['volume'+currName[ind]+'buyDFI', 'volume'+currName[ind]+'sellDFI'])
hourlyTrades['volumeOverallbuyDFI'] = hourlyTrades['volumeBTCbuyDFI']+hourlyTrades['volumeETHbuyDFI']+hourlyTrades['volumeUSDTbuyDFI'] + \
hourlyTrades['volumeDOGEbuyDFI']+hourlyTrades['volumeLTCbuyDFI']+hourlyTrades['volumeBCHbuyDFI'] + \
hourlyTrades['volumeUSDCbuyDFI']
hourlyTrades['volumeOverallsellDFI'] = hourlyTrades['volumeBTCsellDFI']+hourlyTrades['volumeETHsellDFI']+hourlyTrades['volumeUSDTsellDFI'] + \
hourlyTrades['volumeDOGEsellDFI']+hourlyTrades['volumeLTCsellDFI']+hourlyTrades['volumeBCHsellDFI'] + \
hourlyTrades['volumeUSDCsellDFI']
columns2update.extend(['volumeOverallbuyDFI', 'volumeOverallsellDFI'])
ind2Delete = self.hourlyData.columns.intersection(columns2update) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(hourlyTrades[columns2update], how='outer', left_index=True, right_index=True) # delete existing columns to add new ones
self.updated_hourlyDEXTrades = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadTokenCrypto(self):
print('>>>> Start update token data ... <<<<')
filePath = self.dataPath + 'TokenData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tokenCryptos:
tokenData = | pd.read_csv(filePath, index_col=0) | pandas.read_csv |
import string
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
class TestSeriesAccessor:
# TODO: collect other Series accessor tests
def test_to_dense(self):
s = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]")
result = s.sparse.to_dense()
expected = pd.Series([0, 1, 0, 10])
tm.assert_series_equal(result, expected)
class TestFrameAccessor:
def test_accessor_raises(self):
df = pd.DataFrame({"A": [0, 1]})
with pytest.raises(AttributeError, match="sparse"):
df.sparse
@pytest.mark.parametrize("format", ["csc", "csr", "coo"])
@pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])])
@pytest.mark.parametrize("dtype", ["float64", "int64"])
@td.skip_if_no_scipy
def test_from_spmatrix(self, format, labels, dtype):
import scipy.sparse
sp_dtype = pd.SparseDtype(dtype, np.array(0, dtype=dtype).item())
mat = scipy.sparse.eye(10, format=format, dtype=dtype)
result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels)
expected = pd.DataFrame(
np.eye(10, dtype=dtype), index=labels, columns=labels
).astype(sp_dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"columns",
[["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]],
)
@td.skip_if_no_scipy
def test_from_spmatrix_columns(self, columns):
import scipy.sparse
dtype = pd.SparseDtype("float64", 0.0)
mat = scipy.sparse.random(10, 2, density=0.5)
result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns)
expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_to_coo(self):
import scipy.sparse
df = pd.DataFrame({"A": [0, 1, 0], "B": [1, 0, 0]}, dtype="Sparse[int64, 0]")
result = df.sparse.to_coo()
expected = scipy.sparse.coo_matrix(np.asarray(df))
assert (result != expected).nnz == 0
def test_to_dense(self):
df = pd.DataFrame(
{
"A": pd.SparseArray([1, 0], dtype=pd.SparseDtype("int64", 0)),
"B": pd.SparseArray([1, 0], dtype=pd.SparseDtype("int64", 1)),
"C": pd.SparseArray([1.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)),
},
index=["b", "a"],
)
result = df.sparse.to_dense()
expected = pd.DataFrame(
{"A": [1, 0], "B": [1, 0], "C": [1.0, 0.0]}, index=["b", "a"]
)
tm.assert_frame_equal(result, expected)
def test_density(self):
df = pd.DataFrame(
{
"A": pd.SparseArray([1, 0, 2, 1], fill_value=0),
"B": pd.SparseArray([0, 1, 1, 1], fill_value=0),
}
)
res = df.sparse.density
expected = 0.75
assert res == expected
@pytest.mark.parametrize("dtype", ["int64", "float64"])
@pytest.mark.parametrize("dense_index", [True, False])
@td.skip_if_no_scipy
def test_series_from_coo(self, dtype, dense_index):
import scipy.sparse
A = scipy.sparse.eye(3, format="coo", dtype=dtype)
result = | pd.Series.sparse.from_coo(A, dense_index=dense_index) | pandas.Series.sparse.from_coo |
import requests
import time
import pandas as pd
class StationsFeed(object):
"""Client that pulls data from Divvy JSON feed:
https://feeds.divvybikes.com/stations/stations.json
Attributes:
data:
event_history:
"""
def __init__(self):
self.data = pd.DataFrame()
self.event_history = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.iv.data import IVData
try:
import xarray as xr
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
def test_numpy_2d() -> None:
x = np.empty((10, 2))
xdh = IVData(x)
assert xdh.ndim == x.ndim
assert xdh.cols == ["x.0", "x.1"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
assert xdh.labels == {0: xdh.rows, 1: xdh.cols}
def test_numpy_1d() -> None:
x = np.empty(10)
xdh = IVData(x)
assert xdh.ndim == 2
assert xdh.cols == ["x"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
def test_pandas_df_numeric() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
assert xdh.ndim == 2
assert xdh.cols == list(xdf.columns)
assert xdh.rows == list(xdf.index)
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
def test_pandas_series_numeric() -> None:
x = np.empty(10)
index = pd.date_range("2017-01-01", periods=10)
xs = pd.Series(x, name="charlie", index=index)
xdh = IVData(xs)
assert xdh.ndim == 2
assert xdh.cols == [xs.name]
assert xdh.rows == list(xs.index)
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_1d() -> None:
x_np = np.random.randn(10)
x = xr.DataArray(x_np)
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert dh.rows == list(np.arange(10))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index = pd.date_range("2017-01-01", periods=10)
x = xr.DataArray(x_np, [("time", index)])
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert_series_equal(pd.Series(dh.rows), pd.Series(list(index)))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np[:, None], columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_2d() -> None:
x_np = np.random.randn(10, 2)
x = xr.DataArray(x_np)
dh = IVData(x)
assert_equal(dh.ndarray, x_np)
assert dh.rows == list(np.arange(10))
assert dh.cols == ["x.0", "x.1"]
expected = | pd.DataFrame(x_np, columns=dh.cols, index=dh.rows) | pandas.DataFrame |
from abc import ABC, abstractmethod,ABCMeta
import sys
import os
import math
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from data_fetcher import downloader
from datetime import datetime
from collections import OrderedDict,Set
import numpy as np
import matplotlib.pyplot as plt
def companies():
dataset = pd.read_csv(os.path.join("data","dow30.csv"))
return dataset
def symbol_list():
dataset = pd.read_csv(os.path.join("data","dow30.csv"))
return dataset['Symbol'].values.tolist()
class BaseData(object):
def __init__(self,symbol:str):
self.__symbol = symbol
@property
def symbol(self):
return self.__symbol
def save(self,file_dir:str,file_name:str,data:pd.DataFrame):
try:
if data is None:
return
full_path = os.path.join(file_dir,file_name)
include_index = False if data.index.name == None else True
if os.path.isdir(file_dir):
data.to_csv(full_path,index=include_index)
else:
os.makedirs(file_dir)
data.to_csv(full_path,index=include_index)
except OSError as err:
print("OS error for symbol {} : {}".format(self.symbol,err))
except:
print("Unexpected error for symbol {} : {}".format(self.symbol, sys.exc_info()[0]))
class Downloader(BaseData):
def __init__(self,symbol:str,start_date:str, end_date:str):
try:
BaseData.__init__(self,symbol)
self.__start_date = datetime.strptime(start_date,'%Y%m%d')
self.__end_date = datetime.strptime(end_date,'%Y%m%d')
self.__data = None
#Download data from Yahoo.
yah = downloader.load_yahoo_quote(symbol,start_date,end_date)
header = yah[0].split(',')
table = []
for i in yah[1:]:
quote = i.split(',')
if len(quote)>1:
d = dict()
d[header[0]] = quote[0]
d[header[1]] = quote[1]
d[header[2]] = quote[2]
d[header[3]] = quote[3]
d[header[4]] = quote[4]
d[header[5]] = quote[5]
d[header[6]] = quote[6]
table.append(d)
self.__data = pd.DataFrame(table)
self.__size = len(self.__data)
except OSError as err:
print("OS error for symbol {} : {}".format(symbol,err))
def save(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"quotes.csv",self.__data)
@property
def start_date(self):
return self.__start_date
@property
def end_date(self):
return self.__end_date
@property
def data(self):
return self.__data
@property
def size(self):
return self.__size
class Feature_Selection(BaseData):
def __init__(self,symbol:str,data:pd.DataFrame,mfi_days=14):
BaseData.__init__(self,symbol)
self.__days = mfi_days
self.__data = None
self.__data_normal = None
cols = data.columns.values
cols_check = "Date,Open,High,Low,Close,Adj Close,Volume".split(',')
missing = False
for col in cols:
found = False
for name in cols_check:
if col == name:
found = True
break
if not found:
print("The column {} is missing.".format(col))
missing = True
break
if not missing:
self.__data = data
self.__data['Date'] = pd.to_datetime(self.__data['Date'])
self.__data.sort_values('Date',inplace=True)
self.__data.reset_index(drop=True,inplace=True)
self.__data.index.name = 'index'
@classmethod
def read_csv(cls,symbol:str,file_loc:str):
try:
data = pd.read_csv(file_loc)
return cls(symbol,data)
except OSError as err:
print("OS error {}".format(err))
return None
@property
def data(self):
return self.__data
@property
def data_normal(self):
return self.__data_normal
def calculate_features(self):
self.__cal_log_return("Adj Close")
self.__cal_mfi()
def __scale_data(self,col_Name:str):
values = self.__data[col_Name].iloc[self.__days:].values.reshape(-1,1)
scaler = MinMaxScaler(feature_range=(-1,1))
return scaler.fit_transform(values).flatten()
def __flatten_data(self,col_Name:str):
return self.__data[col_Name].iloc[self.__days:].values.flatten()
def normalize_data(self):
index = self.__data.index.values[self.__days:]
table = OrderedDict()
table['close'] = self.__flatten_data('Adj Close')
table['returns'] = self.__flatten_data('Adj Close_log_returns')
table['mfi'] = self.__flatten_data('mfi_index')
table['normal_close'] = self.__scale_data('Adj Close')
table['normal_returns'] = self.__scale_data('Adj Close_log_returns')
table['normal_mfi'] = self.__scale_data('mfi_index')
self.__data_normal = pd.DataFrame(table,index=index)
self.__data_normal.index.name = 'index'
def __cal_log_return(self,col_name:str):
values = self.__data[col_name].values
log_returns = np.zeros_like(values)
for idx in range(1,len(values)):
log_returns[idx] = math.log(values[idx]/values[idx-1])
self.__data[col_name+"_log_returns"] = pd.Series(log_returns, index = self.__data.index)
def save_stock_data(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"quote_processed.csv",self.__data_normal)
def save_normalized_data(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"normalized.csv",self.__data_normal)
def __cal_mfi(self):
typ_price = | pd.DataFrame((self.__data["High"] + self.__data["Low"] + self.__data["Adj Close"])/3, columns =["price"] ) | pandas.DataFrame |
"""
data generation module
contains a **Generator** class for generating either **domain specific**
(i.e. names, jobs, addresses, currencies & profiles) or **random** data.
for domain specific data **Generator** supports *name*, *job*, *address*, *currency*, *profile*.
For random data Generator supports generating data using
1. number of rows
2. number of columns
3. number of rows & columns
4. all the above with a predefined metadata
the **Generator** class also supports constraining the generated data to a specific type such as
*integers*, *floats* or *strings*.
for all generation operations Generator relies the **Engine** class to generate both
domain-specific and random data. The **Engine** class in turn relies on faker + pandas
to generate data
"""
import string
import random
import logging
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
from faker import Faker
from mok.common import SUPPORTED_DATA_TYPES, SUPPORTED_DATA_DOMAINS
logger = logging.getLogger(__name__)
# init. faker to generate domain specific data
faker = Faker()
class Engine:
"""data generation engine/helper used by **Generate** to generate lists
of various data types. these lists will then be combined to create a dataframe
and then exported to various file formats.
***
**methods**
***
*specific*
*random*
***
"""
@staticmethod
def specific(num_rows, domain):
"""
generate a dataframe of domain specific yet random data using
faker
***
**parameters**
***
*num_rows*: number of rows to generate
*domain*: domain of generated data (can be *name*, *job*, *address*, *currency*, *profile*)
***
"""
generated = []
for _ in range(num_rows):
# use faker to generate fake dicts & save them in a list
if domain == "name":
generated.append(faker.name())
elif domain == "job":
generated.append(faker.job())
elif domain == "address":
generated.append(faker.address())
elif domain == "currency":
currency = faker.currency()
generated.append({"symbol": currency[0], "name": currency[1]})
elif domain == "profile":
generated.append(faker.simple_profile())
# convert list of dicts to a pandas dataframe
# list of strings
if isinstance(generated[0], str):
data = pd.DataFrame( # pylint: disable=invalid-name
generated,
columns=[domain],
)
# list of dicts
elif isinstance(generated[0], dict):
data = pd.DataFrame.from_dict(generated) # pylint: disable=invalid-name
return data
@staticmethod
def _integer(min_val, max_val, num_rows):
"""
generate a list of random integers
parameters
min_val: lower bound of numbers generated
max_val: upper bound of numbers generated
num_rows: num items to generate
"""
return list(np.random.randint(min_val, max_val, size=(1, num_rows)))[0]
@staticmethod
def _str(min_len, max_len, num_rows):
"""
generate a list of random strings
parameters
min_len: min. length of string
max_val: max. length of string
num_rows: num items to generate
"""
# random lengths array
str_lens = np.random.randint(min_len, max_len, size=(1, num_rows))
# use above lengths to generate random strings
random_str_fn = np.vectorize(
lambda length: "".join(random.choices(string.ascii_lowercase, k=length))
)
return list(random_str_fn(str_lens).tolist())[0]
@staticmethod
def _float(min_val, max_val, num_rows):
"""
generate a list of random floats
parameters
min_val: lower bound of numbers generated
max_val: upper bound of numbers generated
num_rows: num items to generate
"""
return list(np.random.uniform(min_val, max_val, [1, num_rows]).tolist())[0]
@staticmethod
def random( # pylint: disable=dangerous-default-value,too-many-arguments,too-many-locals
num_rows,
num_columns=10,
metadata=None,
min_val=0,
max_val=1000,
min_len=3,
max_len=20,
col_types=[int, str, float],
):
"""
generate a dataframe of psuedo-random data
***
**parameters**
***
*num_rows*: number of rows to generate
*num_columns*: number of columns to generate
*metadata*: description of data to generate (python dict of the form {col_name: col_val}))
for e.g.
metdata = {"id": int, "product": str, "price": float}
*min_val*: minimum value for integer/float values generated
*max_val*: maximum value for integer/float values generated
*min_len*: minimum length for string values generated
*max_len*: maximum length for string values generated
*col_types*: data types to generate (by default is `[float, str, int]`)
***
"""
generated = {}
column_names = []
column_types = []
if metadata is not None:
# iterate through metadata and generate columns
for column_name, column_type in metadata.items():
column_names.append(column_name)
column_types.append(column_type)
if column_type == int:
generated[column_name] = Engine._integer(
min_val=min_val, max_val=max_val, num_rows=num_rows
)
elif column_type == str:
generated[column_name] = Engine._str(
min_len=min_len, max_len=max_len, num_rows=num_rows
)
elif column_type == float:
generated[column_name] = Engine._float(
min_val=min_val, max_val=max_val, num_rows=num_rows
)
data = | pd.DataFrame.from_dict(generated, orient="index") | pandas.DataFrame.from_dict |
"""
This script contains functions for easily getting into SQLlite databases
"""
import sqlite3
from sqlite3 import Error
import pandas as pd
import math
def create_connection(db_file):
"""
Connect to sqlite database, return an error if it cannot
:param db_file: database path location
"""
try:
conn = sqlite3.connect(db_file)
print('SQL Connection Created')
return conn
except Error as e:
print(e)
return None
def get_data(conn, startdate, enddate, cpd):
"""
gather data between start and end dates
:param conn: SQLlite3 connection
:param startdate: starting date string , format ex. '2019-07-23 00:00:00'
:param enddate: end date string, format ex. 2019-07-24 23:59:59
:param cpd: the SQLlite database header of the data column
:return:
"""
cur = conn.cursor()
# get data between start and end date
cur.execute(f"SELECT * FROM {cpd} WHERE date BETWEEN '{startdate}' AND '{enddate}'")
rows = cur.fetchall()
# convert to clean dataframe
if len(rows) > 100000:
iterations = math.ceil(len(rows) / 100000)
n = int(len(rows) / (iterations * 2))
chunked_rows = [rows[i * n:(i + 1) * n] for i in range((len(rows) + n - 1) // n)]
data = pd.DataFrame()
for i in range(len(chunked_rows)):
frame = pd.DataFrame(chunked_rows[i])
data = data.append(frame, ignore_index=True)
del frame
print(f'Data Chunk {i} Appended')
data.columns = ['id', 'date', '1', 'status', '2', '3', '4', '5', '6', 'pos', '7', '8', '9', '10', '11', 'ch4',
'12', '13', '14']
badcols = list(pd.Series(range(1, 15)).astype(str))
data.drop(badcols, axis=1, inplace=True)
# create proper datetimes
data['datetime'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
'''
Aggregate dataframes collected from separate sources
'''
import pandas as pd
import numpy as np
import json
import pickle
from utils import doubleheaders, get_momentum_model_inputs, calculate_distance_model_inputs, id_converter, assemble_metrics
from gauss_rank_scaler import GaussRankScaler
print("Reading in statistics...")
try:
today_games = pd.read_csv("./all_data/today_games.csv")
season_totals = pd.read_csv("./all_data/season_totals.csv")
starter_career = pd.read_csv("./all_data/starter_career.csv")
starter_season = pd.read_csv("./all_data/starter_season.csv")
all_starters = pd.read_csv("./all_data/past_raw.csv.gz", compression = "gzip")
except Exception as e:
print("Could not read in statistics: {}".format(e))
raise
s_map = id_converter(today_games, all_starters)
del all_starters
today_games["road_starter_"] = today_games.road_starter.map(s_map)
today_games["home_starter_"] = today_games.home_starter.map(s_map)
today_games = today_games.drop(columns = ["home_starter", "road_starter"]).rename(columns = {
"road_starter_" : "road_starter",
"home_starter_" : "home_starter"
})
working = pd.DataFrame(assemble_metrics(today_games, season_totals, starter_season, starter_career))
working = doubleheaders(working)
try:
working_elo = pd.read_csv("./all_data/daily_elo.csv.gz", compression = "gzip")
except Exception as e:
print("Could not read in elo: {}".format(e))
raise
working_elo["date"] = pd.to_datetime(working_elo.date, format = "%Y-%m-%d")
working_elo = doubleheaders(working_elo, home = "team1")
working_elo = working_elo.drop(columns = ["playoff", "neutral"]).rename(columns = {"team1" : "home_team",
"team2" : "road_team"})
working_elo = working_elo.sort_index(ascending = False).reset_index(drop = True)
working_elo = working_elo[working_elo.date <= working.date.max()]
merge_cols1 = ["date", "home_team", "road_team", "is_doubleheader", "is_tripleheader"]
working = working.merge(working_elo, how = "left", left_on = merge_cols1,
right_on = merge_cols1)
drop_cols = ["elo1_pre", "elo2_pre", "elo_prob1", "elo_prob2", "elo1_post", "elo2_post",
"pitcher1", "pitcher2", "rating_prob2", "rating1_post", "rating2_post", "year_x",
"year_y"]
working = working.drop(drop_cols, axis = 1)
for_momentum = working_elo[working_elo.date < working.date.max()].reset_index(drop = True)
all_momentum = pd.DataFrame(get_momentum_model_inputs(working, for_momentum))
del for_momentum
merge_cols2 = ["date", "is_doubleheader", "is_tripleheader", "home_team", "road_team"]
working = working.merge(all_momentum, how = "left", left_on = merge_cols2, right_on = merge_cols2)
try:
stadiums = | pd.read_csv("./adv_metric_constants/all_stadiums_w_park_ids.csv", index_col = [0]) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# Previously I built XG Boost models to predict the main and sub-types of Pokemon from all 7 generations (https://www.kaggle.com/xagor1/pokemon-type-predictions-using-xgb). This was relatively successful, but often stalled at around 70% accuracy per generation, with some much worse. To gain more experience with parameter tuning and feature engineering, I decided to revisit just the 1st Generation, and see if I could improve my results.
# In[2]:
#Load various packages
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import time
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
import xgboost as xgb
from xgboost import plot_importance
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
print(os.listdir("../../../input/rounakbanik_pokemon"))
from sklearn.feature_selection import SelectFromModel
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# # Loading and Modifying Data
#
# To start with, I loaded and modified the data as in the previous kernel.
#
# In contrast to last time, I separated out the numerical and categorical data, and applied one-hot encoding to the latter. This caused the number of features to explode from 24 to 500.
#
# The original plan was to do feature engineering to improve my overall accuracy. However, thus far all my attempts have actually made the predictions worse, so I have left this aside for now.
# In[3]:
#Read data
path = "../../../input/rounakbanik_pokemon/"
egg_df=pd.read_csv(path+"pokemon.csv")
species_df=pd.read_csv(path+"pokemon.csv")
abilities_df=pd.read_csv(path+"pokemon.csv")
#Split duplicates off & combine back
egg2_df=pd.DataFrame.copy(egg_df)
egg2_df=egg_df.loc[egg_df['species_id'].duplicated(), :]
egg_df.drop_duplicates('species_id',inplace=True)
merged = egg_df.merge(egg2_df,on="species_id",how='outer')
merged.fillna(0,inplace=True)
#Rename columns to simpler form.
merged.rename(index=str,columns={"egg_group_id_x":"egg_group_1"},inplace=True)
merged.rename(index=str,columns={"egg_group_id_y":"egg_group_2"},inplace=True)
#Drop last 6 columns
merged.drop(merged.tail(6).index,inplace=True)
#Rename
merged.rename(index=str,columns={"species_id":"pokedex_number"},inplace=True)
#Make a new smaller dataframe
species_trim_df=pd.DataFrame()
species_trim_df["pokedex_number"]=species_df['id']
species_trim_df["color_id"]=species_df['color_id']
species_trim_df["shape_id"]=species_df['shape_id']
species_trim_df["habitat_id"]=species_df['habitat_id']
species_trim_df.drop(species_trim_df.tail(6).index,inplace=True)
#Trim all below Magearna off
abilities_df = abilities_df[abilities_df.pokemon_id < 802]
#Make 3 new columns
abilities_df["Ability1"]=0
abilities_df["Ability2"]=0
abilities_df["Ability3"]=0
#Assign values to the 3 columns based on the ability slot (1-3)
abilities_df["Ability1"] = abilities_df.ability_id.where(abilities_df.slot == 1,0)
abilities_df["Ability2"] = abilities_df.ability_id.where(abilities_df.slot == 2,0)
abilities_df["Ability3"] = abilities_df.ability_id.where(abilities_df.slot == 3,0)
#Split duplicates off into new dataframes
#3 abilities on some means it needs to be split twice
#I'm sure there's an easier way to do this
abilities_df2=pd.DataFrame.copy(abilities_df)
abilities_df2=abilities_df.loc[abilities_df['pokemon_id'].duplicated(), :]
abilities_df.drop_duplicates('pokemon_id',inplace=True)
abilities_df3=pd.DataFrame.copy(abilities_df2)
abilities_df3=abilities_df2.loc[abilities_df2['pokemon_id'].duplicated(), :]
abilities_df2.drop_duplicates('pokemon_id',inplace=True)
#Drop extra columns
abilities_df.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df2.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df3.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
#Combine everything back
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df2.set_index('pokemon_id'),fill_value=0).reset_index()
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df3.set_index('pokemon_id'),fill_value=0).reset_index()
#Rename pokemon_id to pokedex number to allow for merging.
abilities_df.rename(index=str,columns={"pokemon_id":"pokedex_number"},inplace=True)
#Read Kaggle data
path = "../../../input/rounakbanik_pokemon/"
pokemon_df=pd.read_csv(path+"pokemon.csv")
Name_df=pd.DataFrame()
Name_df["name"]=pokemon_df["name"].copy()
#Fix Minior's capture rate
pokemon_df.capture_rate.iloc[773]=30
#Change the type
pokemon_df['capture_rate']=pokemon_df['capture_rate'].astype(str).astype(int)
#Merge all my data.
pokemon_df=pokemon_df.merge(merged,on="pokedex_number",how='outer')
pokemon_df=pokemon_df.merge(species_trim_df,on="pokedex_number",how='outer')
pokemon_df=pokemon_df.merge(abilities_df,on="pokedex_number",how='outer')
#Remove against columns
pokemon_df.drop(list(pokemon_df.filter(regex = 'against')), axis = 1, inplace = True)
#Correct the spelling error
pokemon_df.rename(index=str,columns={"classfication":"classification"},inplace=True)
#Change nan to 'none'
pokemon_df.type2.replace(np.NaN, 'none', inplace=True)
#Drop Pokedex number for now
pokemon_df.drop("pokedex_number",axis=1,inplace=True)
pokemon_df.drop("generation",axis=1,inplace=True)
#First find the NAs.
index_height = pokemon_df['height_m'].index[pokemon_df['height_m'].apply(np.isnan)]
index_weight = pokemon_df['weight_kg'].index[pokemon_df['weight_kg'].apply(np.isnan)]
index_male = pokemon_df['percentage_male'].index[pokemon_df['percentage_male'].apply(np.isnan)]
#Manually replace the missing heights & weights using the Kanto version etc
pokemon_df.height_m.iloc[18]=0.3
pokemon_df.height_m.iloc[19]=0.7
pokemon_df.height_m.iloc[25]=0.8
pokemon_df.height_m.iloc[26]=0.6
pokemon_df.height_m.iloc[27]=1.0
pokemon_df.height_m.iloc[36]=0.6
pokemon_df.height_m.iloc[37]=1.1
pokemon_df.height_m.iloc[49]=0.2
pokemon_df.height_m.iloc[50]=0.7
pokemon_df.height_m.iloc[51]=0.4
pokemon_df.height_m.iloc[52]=1.0
pokemon_df.height_m.iloc[73]=0.4
pokemon_df.height_m.iloc[74]=1.0
pokemon_df.height_m.iloc[75]=1.4
pokemon_df.height_m.iloc[87]=0.9
pokemon_df.height_m.iloc[88]=1.2
pokemon_df.height_m.iloc[102]=2.0
pokemon_df.height_m.iloc[104]=1.0
pokemon_df.height_m.iloc[719]=0.5
pokemon_df.height_m.iloc[744]=0.8
pokemon_df.weight_kg.iloc[18]=3.5
pokemon_df.weight_kg.iloc[19]=18.5
pokemon_df.weight_kg.iloc[25]=30.0
pokemon_df.weight_kg.iloc[26]=12.0
pokemon_df.weight_kg.iloc[27]=29.5
pokemon_df.weight_kg.iloc[36]=9.9
pokemon_df.weight_kg.iloc[37]=19.9
pokemon_df.weight_kg.iloc[49]=0.8
pokemon_df.weight_kg.iloc[50]=33.3
pokemon_df.weight_kg.iloc[51]=4.2
pokemon_df.weight_kg.iloc[52]=32.0
pokemon_df.weight_kg.iloc[73]=20.0
pokemon_df.weight_kg.iloc[74]=105.0
pokemon_df.weight_kg.iloc[75]=300.0
pokemon_df.weight_kg.iloc[87]=30.0
pokemon_df.weight_kg.iloc[88]=30.0
pokemon_df.weight_kg.iloc[102]=120.0
pokemon_df.weight_kg.iloc[104]=45.0
pokemon_df.weight_kg.iloc[719]=9.0
pokemon_df.weight_kg.iloc[744]=25.0
#Create a Genderless column to separate them from the all-female cases.
pokemon_df["Genderless"]=0
pokemon_df["Genderless"].loc[list(index_male)]=1
#Replace all the NANs with zeros in the % male
pokemon_df.percentage_male.replace(np.NaN, 0, inplace=True)
#Check the typings of the pokemon with Alolan forms & fix
#I'm sure this can be done much more elegantly
pokemon_df.type2.iloc[18]='none'
pokemon_df.type2.iloc[19]='none'
pokemon_df.type2.iloc[25]='none'
pokemon_df.type2.iloc[26]='none'
pokemon_df.type2.iloc[27]='none'
pokemon_df.type2.iloc[36]='none'
pokemon_df.type2.iloc[37]='none'
pokemon_df.type2.iloc[49]='none'
pokemon_df.type2.iloc[50]='none'
pokemon_df.type2.iloc[51]='none'
pokemon_df.type2.iloc[52]='none'
pokemon_df.type2.iloc[87]='none'
pokemon_df.type2.iloc[88]='none'
pokemon_df.type2.iloc[104]='none'
#Lets start with just the numerical data for now.
num_features=pokemon_df.select_dtypes(include=np.number)
num_features=num_features.columns
#print("The Type models will be built using the following features")
#print(list(num_features))
# In[4]:
numerical_df=pd.DataFrame.copy(pokemon_df[['attack', 'base_egg_steps', 'base_happiness', 'base_total','capture_rate', 'defense', 'experience_growth','height_m', 'hp', 'percentage_male', 'sp_attack', 'sp_defense', 'speed','weight_kg']])
numerical_df.to_csv('numerical_features.csv',index=False)
one_hot_df=pd.DataFrame.copy(pokemon_df[["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"]])
one_hot_df=pd.get_dummies(one_hot_df,prefix=["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"],columns=["Ability1","Ability2","Ability3","egg_group_1","egg_group_2","is_legendary","color_id","shape_id","habitat_id","Genderless"])
one_hot_df.to_csv('one_hot_features.csv',index=False)
features=pd.concat([numerical_df,one_hot_df],axis=1)
# In[ ]:
#Do some feature engineering
#features["Total_Offense"]=features["attack"]+features["sp_attack"]
#features["Total_Defense"]=features["defense"]+features["sp_defense"]
#features["Total_Physical"]=features["attack"]+features["defense"]
#features["Total_Special"]=features["sp_attack"]+features["sp_defense"]
#features["Attack_Difference"]=abs(features["attack"]-features["sp_attack"])
#features["Defense_Difference"]=abs(features["defense"]-features["sp_defense"])
#features["Physical_Difference"]=abs(features["attack"]-features["defense"])
#features["Special_Difference"]=abs(features["sp_attack"]-features["sp_defense"])
#features["HeightXWeight"]=features["height_m"]*features["weight_kg"]
#features["BMI"]=features["weight_kg"]/(features["weight_kg"]**2)
#features["Speed_X_Weight"]=features["speed"]*features["weight_kg"]
#features=features.drop(columns=["attack","sp_attack"])
# In[5]:
targets=pd.DataFrame()
targets2=pd.DataFrame()
targets["type1"]=pokemon_df["type1"]
targets=np.ravel(targets)
targets2["type2"]=pokemon_df["type2"]
targets2=np.ravel(targets2)
#Split features & targets into each generation.
Gen1_features=features[0:151]
Gen2_features=features[151:251]
Gen3_features=features[251:386]
Gen4_features=features[386:493]
Gen5_features=features[493:649]
Gen6_features=features[649:721]
Gen7_features=features[721:801]
Gen1_targets=targets[0:151]
Gen2_targets=targets[151:251]
Gen3_targets=targets[251:386]
Gen4_targets=targets[386:493]
Gen5_targets=targets[493:649]
Gen6_targets=targets[649:721]
Gen7_targets=targets[721:801]
Gen1_targets=np.ravel(Gen1_targets)
Gen2_targets=np.ravel(Gen2_targets)
Gen3_targets=np.ravel(Gen3_targets)
Gen4_targets=np.ravel(Gen4_targets)
Gen5_targets=np.ravel(Gen5_targets)
Gen6_targets=np.ravel(Gen6_targets)
Gen7_targets=np.ravel(Gen7_targets)
#Recombine 6 of them, in 7 different ways, to make my different training sets
#Ordering of the features & targets should be the same!
#But doesn't have to be necessarily in numerical order
Gens_not1_features=pd.concat([Gen2_features,Gen3_features,Gen4_features,Gen5_features,Gen6_features,Gen7_features],axis=0)
Gens_not2_features= | pd.concat([Gen1_features,Gen3_features,Gen4_features,Gen5_features,Gen6_features,Gen7_features],axis=0) | pandas.concat |
import os
import logging
import pandas as pd
from collections import defaultdict
def get_sample_files(path,outfile='samples.tsv'):
samples = defaultdict(dict)
seen = set()
for dir_name, sub_dirs, files in os.walk(os.path.abspath(path)):
for fname in files:
if ".fastq" in fname or ".fq" in fname:
sample_id = fname.split(".fastq")[0].split(".fq")[0]
sample_id = sample_id.replace("_R1", "").replace("_r1", "").replace("_R2", "").replace("_r2", "")
sample_id = sample_id.replace("_", "-").replace(" ", "-")
fq_path = os.path.join(dir_name, fname)
if fq_path in seen: continue
if "_R2" in fname or "_r2" in fname:
if 'R2' in samples[sample_id]:
logging.error(f"Duplicate sample {sample_id} was found after renaming; skipping... \n Samples: \n{samples}")
samples[sample_id]['R2'] = fq_path
else:
if 'R1' in samples[sample_id]:
logging.error(f"Duplicate sample {sample_id} was found after renaming; skipping... \n Samples: \n{samples}")
samples[sample_id]['R1'] = fq_path
samples= | pd.DataFrame(samples) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy import stats
import requests
from io import StringIO
import datetime
from myutils.utils import getConnection, load_bmrs_data, cronlog, email_script
import traceback
cronlog()
numdays = 14
errstr = ''
try:
t = datetime.datetime.today() - pd.offsets.Day(0)
# Get day ahead prices
r = requests.get('https://www.nordpoolgroup.com/globalassets/marketdata-excel-files/n2ex-day-ahead-auction-prices_2020_hourly_gbp.xls')
a = pd.read_html(StringIO(r.text))[0]
a.columns=['date','time1','time2','price']
a = a[a.price.notna()].copy()
a.price = a.price/100
a['date'] = a['date'].str[-4:] + a['date'].str[3:5] + a['date'].str[:2]
a['time1'] = a['time1'].str[:2]
a.drop(columns=['time2'], inplace=True)
dates = a.date.unique()
dates2 = {k: v for k, v in zip(dates[1:], dates[:-1])}
a['date2'] = np.where(a.time1=='23', a.date.map(dates2), a.date)
lastdate = datetime.datetime.strptime(a['date'].iloc[-1], '%Y%m%d')
dates = [(lastdate-pd.offsets.Day(d)).strftime('%Y%m%d') for d in range(numdays)]
a = a[a.date2.isin(dates)].copy()
a['date'] = a['date2'].astype(int)
a['time'] = a['time1'].astype(int)
prices = a[['date','time','price']].groupby(['date','time']).mean()
# Get Historic demand (net of solar)
datalist = []
for d in range(numdays):
date = (lastdate- | pd.offsets.Day(d) | pandas.offsets.Day |
"""Unittests for the functions in raw, using example datasets."""
import unittest
import pandas.testing as pt
import pandas as pd
from io import StringIO
from gnssmapper import log
import gnssmapper.common.time as tm
import gnssmapper.common.constants as cn
class TestReadCSV(unittest.TestCase):
def setUp(self):
self.filedir = "./tests/data/"
self.filepath = self.filedir+"log_20200211.txt"
def test_read_csv_(self) -> None:
raw_var,fix = log.read_csv_(self.filepath)
raw_expected = pd.DataFrame({
'TimeNanos': [34554000000],
'FullBiasNanos':[-1265446151445559028],
'Svid': [2],
'ConstellationType': [1],
'State': [16431],
'Cn0DbHz': [22.340620040893555]}).convert_dtypes(convert_floating=False,convert_boolean=False,convert_string=False)
fix_expected = pd.DataFrame({
'Latitude': [51.524707],
'Longitude': [-0.134140],
'Altitude': [114.858938],
'(UTC)TimeInMs': [1581410967999]
}).convert_dtypes(convert_floating=False,convert_boolean=False,convert_string=False)
pt.assert_frame_equal(
raw_var.loc[0:0, ['TimeNanos','FullBiasNanos','Svid', 'ConstellationType','State','Cn0DbHz']],
raw_expected)
pt.assert_frame_equal(
fix.loc[0:0, ['Latitude', 'Longitude', 'Altitude', '(UTC)TimeInMs']],
fix_expected
)
def test_platform(self) -> None:
#copy of log.txt with platform replaced by 6
wrong_platform = self.filedir+"wrong_platform.txt"
self.assertWarnsRegex(UserWarning,"Platform 6 found in log file",log.read_csv_,wrong_platform)
def test_version(self) -> None:
#copy of log.txt with version replaced by 1.3.9.9
wrong_version = self.filedir+"wrong_version.txt"
self.assertRaisesRegex(ValueError,"Version 1.3.9.9 found in log file",log.read_csv_,wrong_version)
def test_compare_version(self) -> None:
low = "1.3"
high = "1.4"
expected = "1.4.0.0"
self.assertTrue(log._compare_version(high, expected))
self.assertFalse(log._compare_version(low, expected))
def test_compare_platform(self) -> None:
low = set(["6","M",6])
high = set(["7","N",7,"O",10,"10"])
expected = "7"
self.assertTrue(all([log._compare_platform(x, expected) for x in high]))
self.assertFalse(any([log._compare_platform(x, expected) for x in low]))
class TestProcessRaw(unittest.TestCase):
def setUp(self):
# received 0.1 second after start of week
rx_ = pd.DataFrame(
{'week': [2000], 'day': [0], 'time': [1 * 10 ** 8]})
# transmitted at 0.01 second after start of week
tx_ = pd.DataFrame(
{'week': [2000], 'day': [0], 'time': [1 * 10 ** 7]})
d = {'ConstellationType': [1],
'Svid': [1],
'TimeNanos': tm.gpsweek_to_gps(rx_.week, rx_.day, rx_.time),
'FullBiasNanos': [0],
'ReceivedSvTimeNanos': tm.gpsweek_to_gps(0, tx_.day, tx_.time),
'State': [9]}
self.input = pd.DataFrame(d)
self.tx_gps = tm.gpsweek_to_gps(tx_.week, tx_.day, tx_.time).convert_dtypes()
self.rx = rx_
def test_galileo_ambiguity(self) -> None:
import numpy as np
expected = np.array([6, 7, 8, 9, 10])*cn.nanos_in_period['E']
testdata = np.array([1, 2, 3, 4, 5])+expected
np.testing.assert_array_equal(
log.galileo_ambiguity(testdata),
expected)
def test_period_start_time(self) -> None:
import numpy as np
rx = self.input.TimeNanos[0]
state = 9
constellation = 'G'
expected = tm.gpsweek_to_gps(self.rx.week,
pd.Series([0]),
pd.Series([0]))
pt.assert_series_equal(
log.period_start_time(pd.Series([rx]), | pd.Series([state]) | pandas.Series |
import pandas as pd
import logging
import sys
import os
import numpy as np
from itertools import chain
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
np.random.seed(42)
class Streamgraph(object):
def __init__(self, loglevel="INFO"):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(loglevel)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(loglevel)
self.logger.addHandler(handler)
def get_streamgraph_data(self, metadata, query, n=12, method="tfidf"):
df = pd.DataFrame.from_records(metadata)
df.year = pd.to_datetime(df.year)
df.year = df.year.map(lambda x: x.year)
df.year = df.year.map(lambda x: pd.to_datetime(x, format="%Y"))
df = df[df.subject.map(lambda x: x is not None)]
df.subject = df.subject.map(lambda x: [s for s in x.split("; ") if s])
df = df[df.subject.map(lambda x: x != [])]
df["boundary_label"] = df.year
df = df.explode('subject')
df = df[df.subject != ""]
counts = self.get_counts(df)
boundaries = self.get_boundaries(df)
daterange = self.get_daterange(boundaries)
data = pd.merge(counts, boundaries, on='year')
top_n = self.get_top_n(metadata, query, n, method)
data = (data[data.subject.map(lambda x: x in top_n)]
.sort_values("year")
.reset_index(drop=True))
x = self.get_x_axis(daterange)
sg_data = {}
sg_data["x"] = x
sg_data["subject"] = self.postprocess(daterange, data)
return sg_data
@staticmethod
def get_x_axis(daterange):
return [str(x.year) for x in daterange]
@staticmethod
def get_daterange(boundaries):
daterange = pd.date_range(start=min(boundaries.year).to_datetime64(),
end=max(boundaries.year).to_datetime64(),
freq='AS')
if len(daterange) > 0:
return sorted(daterange)
else:
return sorted(pd.unique(boundaries.year))
@staticmethod
def get_stream_range(df):
stream_range = {
"min": min(df.year),
"max": max(df.year),
"range": max(df.year) - min(df.year)
}
return stream_range
@staticmethod
def get_counts(df):
counts = (df.groupby(["year", "subject"])
.agg({'subject': 'count', 'id': lambda x: ", ".join(x)}))
counts.rename({"subject": "counts"}, axis=1, inplace=True)
counts.reset_index(inplace=True)
return counts
@staticmethod
def get_boundaries(df):
boundaries = df[["boundary_label", "year"]].drop_duplicates()
return boundaries
def get_top_n(self, metadata, query, n, method):
df = pd.DataFrame.from_records(metadata)
df = df[df.subject.map(lambda x: len(x) > 2)]
corpus = df.subject.tolist()
# set stopwords , stop_words='english'
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
tokenizer=lambda x: x.split("; "),
lowercase=False,
stop_words=[query]
)
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
tokenizer=lambda x: x.split("; "),
lowercase=False,
stop_words=[query]
)
if method == "count":
tf = tf_vectorizer.fit_transform(corpus)
counts = pd.DataFrame(tf.toarray(),
columns=tf_vectorizer.get_feature_names())
candidates = counts.sum().sort_values(ascending=False).index.tolist()
candidates = [c for c in candidates if len(c) > 0]
top_n = candidates[:n]
if method == "tfidf":
tfidf = tfidf_vectorizer.fit_transform(corpus)
weights = pd.DataFrame(tfidf.toarray(),
columns=tfidf_vectorizer.get_feature_names())
candidates = weights.sum().sort_values(ascending=False).index.tolist()
candidates = [c for c in candidates if len(c) > 0]
top_n = candidates[:n]
if method == "nmf":
tfidf = tfidf_vectorizer.fit_transform(corpus)
nmf = NMF(n_components=n,
alpha=.1, l1_ratio=.5, init='nndsvd',
random_state=42).fit(tfidf)
top_n = list(chain.from_iterable(
[self.get_top_words(t, tfidf_vectorizer.get_feature_names(), 1)
for t in nmf.components_]))
if method == "lda":
tf = tf_vectorizer.fit_transform(corpus)
lda = LatentDirichletAllocation(n_components=n, max_iter=20,
learning_method='batch',
learning_offset=50.,
random_state=42).fit(tf)
top_n = list(chain.from_iterable(
[self.get_top_words(t, tf_vectorizer.get_feature_names(), 1)
for t in lda.components_]))
return top_n
@staticmethod
def get_top_words(topic, feature_names, n):
indices = topic.argsort()[::-1]
words = [feature_names[i] for i in indices]
words = [w for w in words if len(w) > 2]
return words[:n]
@staticmethod
def postprocess(daterange, data):
x = pd.DataFrame(daterange, columns=["year"])
temp = []
for item in pd.unique(data.subject):
tmp = (pd.merge(data[data.subject == item], x,
left_on="year", right_on="year",
how="right")
.fillna({"counts": 0, "subject": item, "id": "NA"})
.sort_values("year"))
y = tmp.counts.astype(int).to_list()
ids_overall = (pd.unique(tmp[tmp.id != "NA"]
.id.map(lambda x: x.split(", "))
.explode()).tolist())
ids_timestep = tmp.id.map(lambda x: x.split(", ")).tolist()
temp.append({"name": item, "y": y,
"ids_overall": ids_overall,
"ids_timestep": ids_timestep})
df = | pd.DataFrame.from_records(temp) | pandas.DataFrame.from_records |
import os
import json
import datetime
import altair as alt
import numpy as np
import pandas as pd
from enums import PATH, BAR, DATA_NAME_LIST, UNIQUE_VALUE_FILE, LINE, SCATTER
class ScatterBot():
""" Vega JSON Scatter Chart Bot Class.
Develop a script and usevega to generate as many visualzation as possible,
data cleaning the null value, classify the catgorical and numercial type,
Output as JSON format for different type of Bar Charts.
"""
def __init__(self, file_name: str, chart_type: None ) -> None :
"""Creates a new instance of the Bar Bot Object.
Adding data path, plot output path, log output path
Arguments:
----
chart_type {str} -- 'chart Type'
data_path {str} -- data path of chart
"""
# Define chart type and read csv file
self.file_name = file_name
self.chart_type = chart_type
self.df = pd.read_csv(os.path.join(PATH['data'], self.file_name ))
# Categorical and numbercial type
self.cat_col = []
self.num_col = []
# Unique Value
self.uniques = []
def null_count(self, null_percentage: int, dummy_data: bool = False) -> any:
"""Calculating null values.
. We are dropping Columns which have more than 30% of null value
. Replacing null value with mean in case of int and float
.If null value persist for other cases we are dropping those rows
Arguments:
----
null_percentage: pertcentage of dummy want to cut off
dummy_data: Should replace with dummy or not
"""
nulls_count = {col: self.df[col].isnull().sum() for col in self.df.columns}
# print(nulls_count)
is_null_count_out_of_range = \
{col: self.df[col].isnull().sum() / self.df.shape[0] * 100 > null_percentage for col in self.df.columns}
if ( dummy_data ) :
#self._generate_dummy_data()
pass
else :
for k, v in is_null_count_out_of_range.items():
if v:
self.df.drop(k, axis=1, inplace=True)
else:
if isinstance(self.df[k][0], (np.int64, np.float64)):
self.df[k].fillna(self.df[k].mean(), inplace=True)
else:
drop_list = self.df[self.df[k].isnull()].index.tolist()
self.df.drop(drop_list, axis=0, inplace=True)
nulls_count = {col: self.df[col].isnull().sum() for col in self.df.columns}
# print(nulls_count)
def _generate_dummy_data(self) -> any:
"""Generate dummmy data for null column.
"""
pass
def unique_value_list(self) -> any:
"""Finding all the unique values to in each column.
"""
self.uniques = {col: self.df[col].unique().tolist() for col in self.df.columns}
def write_unique_value_list(self) -> any:
"""Writing the above created dictionary to a text file.
"""
file_name = "{fname}_Unique_values.txt".format(fname=self.file_name[:-4])
unique_list_file_path = os.path.join(PATH['data'], file_name )
if os.path.exists(unique_list_file_path):
print( "{fname} already existed".format( fname = file_name ) )
else:
with open(unique_list_file_path, 'w') as json_file:
json.dump(self.uniques, json_file)
print( "Generate {fname} Successfully".format( fname = file_name ) )
def new_folder_for_log_and_plot(self, charts) -> any:
"""Generate new folder for log file and Json plot.
Arguments:
----
charts: Enum Type import from enums.py
"""
log_folder = PATH["log"][self.chart_type]
plot_folder = PATH["plot"][self.chart_type]
if os.path.exists(log_folder):
pass
else:
os.mkdir(log_folder)
if os.path.exists(plot_folder):
pass
else:
os.mkdir(plot_folder)
# Generate Log folder
for chart in (charts):
print(self._generate_new_folder( log_folder, chart.value ))
# Generate plot folder
for chart in (charts):
print(self._generate_new_folder( plot_folder, chart.value ))
def _generate_new_folder( self, folder_path: str, folder_name: str ) -> str:
"""Generate new folder return generate result.
Arguments:
----
folder_path: parent path
folder_name: name of folder
"""
folder_path = os.path.join(folder_path, folder_name)
if os.path.exists(folder_path):
return "{fname} folder already existed".format( fname = folder_path )
else:
os.mkdir(folder_path)
return "Generate {fname} folder Successfully".format( fname = folder_path )
def dtypes_conversion(self) -> any:
"""
. Converting columns to categorical having less than or equal to 10.
. Integer and Float dataype will classify as numberical type
. Check
"""
#unique values in a column
for k,v in self.uniques.items():
if len(pd.Index(v)) <=10:
self.df[k]= self.df[k].astype('category')
self.cat_col = self.df.select_dtypes(include=['category']).columns.tolist()
self.num_col = self.df.select_dtypes(include=['int64','float64']).columns.tolist()
self._dtypes_conversion_datetime()
#self._dtypes_conversion_geograph() [!FIX]
print("Numerical Column : '\n' {num_col}".format( num_col = str(self.num_col) ) )
print("Categorical Column : '\n'{cat_col}".format( cat_col = str(self.cat_col) ))
def _dtypes_conversion_datetime(self) -> any:
"""Check object columns is datetime or not.
. Check the object columns is datetime datatype
. Add new Column according to datetime datatype
"""
types = self.df.dtypes[self.df.dtypes == 'object']
for i, j in types.items():
try:
self.df[i] = pd.to_datetime(df[i])
self.df[i + '_year'] = pd.DatetimeIndex(self.df[i]).year
self.df[i + '_month'] = pd.DatetimeIndex(self.df[i]).month
self.cat_col.append(i + '_year')
self.cat_col.append(i + '_month')
self.df[i + '_month'] = self.df[i + '_month'].apply( lambda x: datetime.date(1900, x, 1).strftime('%B') )
except:
pass
def _dtypes_conversion_geograph(self) -> any:
"""Identify Numercial type as longtitude or lantitude.
"""
pass
def _generate_plot_path(self, cat_name: str, num_name: str, chart_name: str ) -> str:
"""Generate Plot path according to catgorical and numercial column.
Arguments:
----
cat_name: catgorical column name
num_name: numercial column name
chart_name: Specific name of chart.
"""
print( self.file_name[:-4] )
plot_file_name = "{} Vs {}_plot_{}.json".format( cat_name, num_name, self.file_name[:-4] )
if chart_name=="histogram":
plot_file_name = "{}_plot_{}.json".format(num_name, self.file_name[:-4] )
plot_path = os.path.join(PATH['plot'][self.chart_type], chart_name, plot_file_name)
if os.path.exists(plot_path):
print( "{fname} file already existed".format( fname = plot_path ) )
return ""
else:
print( "Generate {fname} folder Successfully".format( fname = plot_path ) )
return plot_path
def get_monotonic_cols(self, df):
return df.loc[:, df.apply(lambda x: x.is_monotonic)].columns
def get_unique_cols(self, df, numerical_cols):
ucols=[]
for i in numerical_cols:
if len(set(df[i]))==len(df[i]):
ucols.append(i)
return ucols
def scatter_JSON_generator(self) -> any:
# Generating JSON using altair methods
tupls= []
for i in range(len(self.num_col)):
for j in range(i+1, len(self.num_col)):
tupls.append((self.num_col[i], self.num_col[j]))
# print(tupls)
for i in tupls:
filtered_column_name0= ''.join(x for x in i[0] if x.isalpha() or x.isnumeric())
filtered_column_name1= ''.join(x for x in i[1] if x.isalpha() or x.isnumeric())
file_path = self._generate_plot_path( filtered_column_name0, filtered_column_name1, SCATTER.SCATTER.value)
if file_path == "" :
pass
else:
temp = | pd.DataFrame.from_dict({filtered_column_name0: self.df[i[0]], filtered_column_name1: self.df[i[1]]}) | pandas.DataFrame.from_dict |
__all__ = [
'get_summary_mapping',
'generate_summaryxref_files',
'merge_oed_to_mapping',
'write_exposure_summary',
'write_summary_levels',
'write_mapping_file',
]
import io
import json
import os
import warnings
import pandas as pd
from ..utils.coverages import SUPPORTED_COVERAGE_TYPES
from ..utils.data import (
factorize_dataframe,
factorize_ndarray,
get_dataframe,
get_json,
merge_dataframes,
set_dataframe_column_dtypes,
get_dtypes_and_required_cols,
)
from ..utils.defaults import (
find_exposure_fp,
SOURCE_IDX,
SUMMARY_MAPPING,
SUMMARY_OUTPUT,
get_loc_dtypes,
get_acc_dtypes,
)
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from ..utils.path import as_path
from ..utils.peril import PERILS, PERIL_GROUPS
from ..utils.status import OASIS_KEYS_STATUS
from .gul_inputs import get_gul_input_items
@oasis_log
def get_summary_mapping(inputs_df, oed_hierarchy, is_fm_summary=False):
"""
Create a DataFrame with linking information between Ktools `OasisFiles`
And the Exposure data
:param inputs_df: datafame from gul_inputs.get_gul_input_items(..) / il_inputs.get_il_input_items(..)
:type inputs_df: pandas.DataFrame
:param is_fm_summary: Indicates whether an FM summary mapping is required
:type is_fm_summary: bool
:return: Subset of columns from gul_inputs_df / il_inputs_df
:rtype: pandas.DataFrame
"""
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
# Case GUL+FM (based on il_inputs_df)
if is_fm_summary:
summary_mapping = inputs_df[inputs_df['level_id'] == inputs_df['level_id'].max()].drop_duplicates(subset=['gul_input_id', 'layer_id'], keep='first')
summary_mapping['agg_id'] = summary_mapping['gul_input_id']
summary_mapping['output_id'] = factorize_ndarray(
summary_mapping.loc[:, ['gul_input_id', 'layer_id']].values,
col_idxs=range(2)
)[0]
summary_mapping.drop('item_id', axis=1, inplace=True)
# GUL Only
else:
summary_mapping = inputs_df.copy(deep=True)
usecols = [
acc_num,
loc_num,
'loc_id',
policy_num,
portfolio_num,
SOURCE_IDX['loc'],
SOURCE_IDX['acc'],
'item_id',
'layer_id',
'coverage_id',
'peril_id',
'agg_id',
'output_id',
'coverage_type_id',
'tiv'
]
summary_mapping.drop(
[c for c in summary_mapping.columns if c not in usecols],
axis=1,
inplace=True
)
dtypes = {
**{t: 'str' for t in [portfolio_num, policy_num, acc_num, loc_num, 'peril_id']},
**{t: 'uint8' for t in ['coverage_type_id']},
**{t: 'uint32' for t in [SOURCE_IDX['loc'], SOURCE_IDX['acc'], 'loc_id', 'item_id', 'layer_id', 'coverage_id', 'agg_id', 'output_id']},
**{t: 'float64' for t in ['tiv']}
}
summary_mapping = set_dataframe_column_dtypes(summary_mapping, dtypes)
return summary_mapping
def merge_oed_to_mapping(summary_map_df, exposure_df, oed_column_set, defaults=None):
"""
Create a factorized col (summary ids) based on a list of oed column names
:param :summary_map_df dataframe return from get_summary_mapping
:type summary_map_df: pandas.DataFrame
:param exposure_df: Summary map file path
:type exposure_df: pandas.DataFrame
:param defaults: Dictionary of vaules to fill NaN columns with
:type defaults: dict
{'Col_A': 0, 'Col_B': 1, 'Col_C': 2}
:return: New DataFrame of summary_map_df + exposure_df merged on exposure index
:rtype: pandas.DataFrame
"""
column_set = [c.lower() for c in oed_column_set]
columns_found = [c for c in column_set if c in exposure_df.columns.to_list()]
columns_missing = list(set(column_set) - set(columns_found))
# Select DF with matching cols
exposure_col_df = exposure_df.loc[:, columns_found]
# Add default value if optional column is missing
for col in columns_missing:
if col in defaults:
exposure_col_df[col] = defaults[col]
else:
raise OasisException('Column to merge "{}" not in locations dataframe or defined with a default value'.format(col))
exposure_col_df[SOURCE_IDX['loc']] = exposure_df.index
new_summary_map_df = merge_dataframes(summary_map_df, exposure_col_df, join_on=SOURCE_IDX['loc'], how='inner')
if defaults:
new_summary_map_df.fillna(value=defaults, inplace=True)
return new_summary_map_df
def group_by_oed(oed_col_group, summary_map_df, exposure_df, sort_by, accounts_df=None):
"""
Adds list of OED fields from `column_set` to summary map file
:param :summary_map_df dataframe return from get_summary_mapping
:type summary_map_df: pandas.DataFrame
:param exposure_df: DataFrame loaded from location.csv
:type exposure_df: pandas.DataFrame
:param accounts_df: DataFrame loaded from accounts.csv
:type accounts_df: pandas.DataFrame
:return: subset of columns from exposure_df to merge
:rtype: list
summary_ids[0] is an int list 1..n array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, ... ])
summary_ids[1] is an array of values used to factorize `array(['Layer1', 'Layer2'], dtype=object)`
"""
oed_cols = [c.lower() for c in oed_col_group] # All requred columns
unmapped_cols = [c for c in oed_cols if c not in summary_map_df.columns] # columns which in locations / Accounts file
mapped_cols = [c for c in oed_cols + [SOURCE_IDX['loc'], SOURCE_IDX['acc'], sort_by] if c in summary_map_df.columns] # Columns already in summary_map_df
# Extract mapped_cols from summary_map_df
summary_group_df = summary_map_df.loc[:, mapped_cols]
# Search Loc / Acc files and merge in remaing
if unmapped_cols is not []:
# Location file columns
exposure_cols = [c for c in unmapped_cols if c in exposure_df.columns]
exposure_col_df = exposure_df.loc[:, exposure_cols + [SOURCE_IDX['loc']]]
summary_group_df = merge_dataframes(summary_group_df, exposure_col_df, join_on=SOURCE_IDX['loc'], how='left')
# Account file columns
if isinstance(accounts_df, pd.DataFrame):
accounts_cols = [c for c in unmapped_cols if c in set(accounts_df.columns) - set(exposure_df.columns)]
if accounts_cols:
accounts_col_df = accounts_df.loc[:, accounts_cols + [SOURCE_IDX['acc']]]
summary_group_df = merge_dataframes(summary_group_df, accounts_col_df, join_on=SOURCE_IDX['acc'], how='left')
summary_group_df.fillna(0, inplace=True)
summary_group_df.sort_values(by=[sort_by], inplace=True)
summary_ids = factorize_dataframe(summary_group_df, by_col_labels=oed_cols)
return summary_ids[0], summary_ids[1]
def write_summary_levels(exposure_df, accounts_fp, target_dir):
'''
Json file with list Available / Recommended columns for use in the summary reporting
Available: Columns which exists in input files and has at least one non-zero / NaN value
Recommended: Columns which are available + also in the list of `useful` groupings SUMMARY_LEVEL_LOC
{
'GUL': {
'available': ['accnumber',
'locnumber',
'istenant',
'buildingid',
'countrycode',
'latitude',
'longitude',
'streetaddress',
'postalcode',
'occupancycode',
'constructioncode',
'locperilscovered',
'buildingtiv',
'contentstiv',
'bitiv',
'portnumber'],
'IL': {
... etc ...
}
}
'''
# Manage internal columns, (Non-OED exposure input)
int_excluded_cols = ['loc_id', SOURCE_IDX['loc']]
desc_non_oed = 'Not an OED field'
int_oasis_cols = {
'coverage_type_id': 'Oasis coverage type',
'peril_id': 'OED peril code',
'coverage_id': 'Oasis coverage identifier',
}
# GUL perspective (loc columns only)
l_col_list = exposure_df.loc[:, exposure_df.any()].columns.to_list()
l_col_info = get_loc_dtypes()
for k in list(l_col_info.keys()):
l_col_info[k.lower()] = l_col_info[k]
del l_col_info[k]
gul_avail = {k: l_col_info[k]['desc'] if k in l_col_info else desc_non_oed
for k in set([c.lower() for c in l_col_list]).difference(int_excluded_cols)}
gul_summary_lvl = {'GUL': {'available': {**gul_avail, **int_oasis_cols}}}
# IL perspective (join of acc + loc col with no dups)
il_summary_lvl = {}
if accounts_fp:
accounts_df = pd.read_csv(accounts_fp)
a_col_list = accounts_df.loc[:, accounts_df.any()].columns.to_list()
a_col_info = get_acc_dtypes()
a_avail = set([c.lower() for c in a_col_list])
il_avail = {k: a_col_info[k]['desc'] if k in a_col_info else desc_non_oed
for k in a_avail.difference(gul_avail.keys())}
il_summary_lvl = {'IL': {'available': {**gul_avail, **il_avail, **int_oasis_cols}}}
with io.open(os.path.join(target_dir, 'exposure_summary_levels.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps({**gul_summary_lvl, **il_summary_lvl}, sort_keys=True, ensure_ascii=False, indent=4))
@oasis_log
def write_mapping_file(sum_inputs_df, target_dir, is_fm_summary=False):
"""
Writes a summary map file, used to build summarycalc xref files.
:param summary_mapping: dataframe return from get_summary_mapping
:type summary_mapping: pandas.DataFrame
:param sum_mapping_fp: Summary map file path
:type sum_mapping_fp: str
:param is_fm_summary: Indicates whether an FM summary mapping is required
:type is_fm_summary: bool
:return: Summary xref file path
:rtype: str
"""
target_dir = as_path(
target_dir,
'Target IL input files directory',
is_dir=True,
preexists=False
)
# Set chunk size for writing the CSV files - default is max 20K, min 1K
chunksize = min(2 * 10**5, max(len(sum_inputs_df), 1000))
if is_fm_summary:
sum_mapping_fp = os.path.join(target_dir, SUMMARY_MAPPING['fm_map_fn'])
else:
sum_mapping_fp = os.path.join(target_dir, SUMMARY_MAPPING['gul_map_fn'])
try:
sum_inputs_df.to_csv(
path_or_buf=sum_mapping_fp,
encoding='utf-8',
mode=('w' if os.path.exists(sum_mapping_fp) else 'a'),
chunksize=chunksize,
index=False
)
except (IOError, OSError) as e:
raise OasisException from e
return sum_mapping_fp
def get_column_selection(summary_set):
"""
Given a analysis_settings summary definition, return either
1. the set of OED columns requested to group by
2. If no information key 'oed_fields', then group all outputs into a single summary_set
:param summary_set: summary group dictionary from the `analysis_settings.json`
:type summary_set: dict
:return: List of selected OED columns to create summary groups from
:rtype: list
"""
if "oed_fields" not in summary_set:
return None
if not summary_set["oed_fields"]:
return None
# Use OED column list set in analysis_settings file
elif isinstance(summary_set['oed_fields'], list) and len(summary_set['oed_fields']) > 0:
return [c.lower() for c in summary_set['oed_fields']]
else:
raise OasisException('Unable to process settings file')
def get_ri_settings(run_dir):
"""
Return the contents of ri_layers.json
Example:
{
"1": {
"inuring_priority": 1,
"risk_level": "LOC",
"directory": " ... /runs/ProgOasis-20190501145127/RI_1"
}
}
:param run_dir: The file path of the model run directory
:type run_dir: str
:return: metadata for the Reinsurance layers
:rtype: dict
"""
return get_json(src_fp=os.path.join(run_dir, 'ri_layers.json'))
@oasis_log
def write_df_to_file(df, target_dir, filename):
"""
Write a generated summary xref dataframe to disk
:param df: The dataframe output of get_df( .. )
:type df: pandas.DataFrame
:param target_dir: Abs directory to write a summary_xref file to
:type target_dir: str
:param filename: Name of file to store as
:type filename: str
"""
target_dir = as_path(target_dir, 'Input files directory', is_dir=True, preexists=False)
chunksize = min(2 * 10**5, max(len(df), 1000))
csv_fp = os.path.join(target_dir, filename)
try:
df.to_csv(
path_or_buf=csv_fp,
encoding='utf-8',
mode=('w'),
chunksize=chunksize,
index=False
)
except (IOError, OSError) as e:
raise OasisException from e
return csv_fp
@oasis_log
def get_summary_xref_df(map_df, exposure_df, accounts_df, summaries_info_dict, summaries_type, gul_items=False):
"""
Create a Dataframe for either gul / il / ri based on a section
from the analysis settings
:param map_df: Summary Map dataframe (GUL / IL)
:type map_df: pandas.DataFrame
:param exposure_df: Location OED data
:type exposure_df: pandas.DataFrame
:param accounts_df: Accounts OED data
:type accounts_df: pandas.DataFrame
:param summaries_info_dict: list of dictionary definitionfor a summary group from the analysis_settings file
:type summaries_info_dict: list
[{
"summarycalc": true,
"eltcalc": true,
"aalcalc": true,
"pltcalc": true,
"id": 1,
"oed_fields": "prog",
"lec_output": true,
"leccalc": {
"return_period_file": true,
"outputs": {
"full_uncertainty_aep": true,
"full_uncertainty_oep": true,
"wheatsheaf_aep": true,
"wheatsheaf_oep": true
}
}
},
...
]
:param summaries_type: Text label to use as key in summary description either ['gul', 'il', 'ri']
:type summaries_type: String
:return summaryxref_df: Dataframe containing abstracted summary data for ktools
:rtypwrite_xref_filee: pandas.DataFrame
:return summary_desc: dictionary of dataFrames listing what summary_ids map to
:rtype: dictionary
"""
summaryxref_df = pd.DataFrame()
summary_desc = {}
# Extract the summary id index column dedpending on summary grouping type
if 'output_id' in map_df:
id_set_index = 'output_id'
ids_set_df = map_df.loc[:, [id_set_index]].rename(columns={id_set_index: "output"})
elif gul_items:
id_set_index = 'item_id'
ids_set_df = map_df.loc[:, [id_set_index]]
else:
id_set_index = 'coverage_id'
ids_set_df = map_df.loc[:, [id_set_index]]
# For each granularity build a set grouping
for summary_set in summaries_info_dict:
summary_set_df = ids_set_df
cols_group_by = get_column_selection(summary_set)
desc_key = '{}_S{}_summary-info.csv'.format(summaries_type, summary_set['id'])
if isinstance(cols_group_by, list):
(
summary_set_df['summary_id'],
set_values
) = group_by_oed(cols_group_by, map_df, exposure_df, id_set_index, accounts_df)
# Build description file
summary_desc[desc_key] = pd.DataFrame(data=list(set_values), columns=cols_group_by)
summary_desc[desc_key].insert(loc=0, column='summary_id', value=range(1, len(set_values) + 1))
else:
# Fall back to setting all in single group
summary_set_df['summary_id'] = 1
summary_desc[desc_key] = pd.DataFrame(data=['All-Risks'], columns=['_not_set_'])
summary_desc[desc_key].insert(loc=0, column='summary_id', value=1)
# Appends summary set to '__summaryxref.csv'
summary_set_df['summaryset_id'] = summary_set['id']
summaryxref_df = pd.concat([summaryxref_df, summary_set_df], sort=True, ignore_index=True)
dtypes = {
t: 'uint32' for t in ['coverage_id', 'summary_id', 'summaryset_id']
}
summaryxref_df = set_dataframe_column_dtypes(summaryxref_df, dtypes)
return summaryxref_df, summary_desc
@oasis_log
def generate_summaryxref_files(model_run_fp, analysis_settings, il=False, ri=False, gul_item_stream=False):
"""
Top level function for creating the summaryxref files from the manager.py
:param model_run_fp: Model run directory file path
:type model_run_fp: str
:param analysis_settings: Model analysis settings file
:type analysis_settings: dict
:param il: Boolean to indicate the insured loss level mode - false if the
source accounts file path not provided to Oasis files gen.
:type il: bool
:param ri: Boolean to indicate the RI loss level mode - false if the
source accounts file path not provided to Oasis files gen.
:type il: bool
:param gul_items: Boolean to gul to use item_id instead of coverage_id
:type gul_items: bool
"""
# Boolean checks for summary generation types (gul / il / ri)
gul_summaries = all([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['gul_summaries'] if 'gul_summaries' in analysis_settings else False,
])
il_summaries = all([
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['il_summaries'] if 'il_summaries' in analysis_settings else False,
il,
])
ri_summaries = all([
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
analysis_settings['ri_summaries'] if 'ri_summaries' in analysis_settings else False,
ri,
])
# Load locations file for GUL OED fields
input_dir = os.path.join(model_run_fp, 'input')
exposure_fp = find_exposure_fp(input_dir, 'loc')
loc_dtypes, loc_required_cols = get_dtypes_and_required_cols(get_loc_dtypes)
exposure_df = get_dataframe(
src_fp=exposure_fp,
empty_data_error_msg='No source exposure file found.',
col_dtypes=loc_dtypes,
required_cols=loc_required_cols)
exposure_df[SOURCE_IDX['loc']] = exposure_df.index
# Load accounts file for IL OED fields
if (il_summaries or ri_summaries):
accounts_fp = find_exposure_fp(input_dir, 'acc')
acc_dtypes, acc_required_cols = get_dtypes_and_required_cols(get_acc_dtypes)
accounts_df = get_dataframe(
src_fp=accounts_fp,
empty_data_error_msg='No source accounts file found.',
col_dtypes=acc_dtypes,
required_cols=acc_required_cols)
accounts_df[SOURCE_IDX['acc']] = accounts_df.index
if gul_summaries:
# Load GUL summary map
gul_map_fp = os.path.join(model_run_fp, 'input', SUMMARY_MAPPING['gul_map_fn'])
gul_map_df = get_dataframe(
src_fp=gul_map_fp,
empty_data_error_msg='No summary map file found.')
gul_summaryxref_df, gul_summary_desc = get_summary_xref_df(
gul_map_df,
exposure_df,
None,
analysis_settings['gul_summaries'],
'gul',
gul_item_stream
)
# Write Xref file
write_df_to_file(gul_summaryxref_df, os.path.join(model_run_fp, 'input'), SUMMARY_OUTPUT['gul'])
# Write summary_id description files
for desc_key in gul_summary_desc:
write_df_to_file(gul_summary_desc[desc_key], os.path.join(model_run_fp, 'output'), desc_key)
if il_summaries:
# Load FM summary map
il_map_fp = os.path.join(model_run_fp, 'input', SUMMARY_MAPPING['fm_map_fn'])
il_map_df = get_dataframe(
src_fp=il_map_fp,
empty_data_error_msg='No summary map file found.'
)
il_summaryxref_df, il_summary_desc = get_summary_xref_df(
il_map_df,
exposure_df,
accounts_df,
analysis_settings['il_summaries'],
'il'
)
# Write Xref file
write_df_to_file(il_summaryxref_df, os.path.join(model_run_fp, 'input'), SUMMARY_OUTPUT['il'])
# Write summary_id description files
for desc_key in il_summary_desc:
write_df_to_file(il_summary_desc[desc_key], os.path.join(model_run_fp, 'output'), desc_key)
if ri_summaries:
ri_layers = get_ri_settings(model_run_fp)
max_layer = max(ri_layers)
summary_ri_fp = os.path.join(
model_run_fp, os.path.basename(ri_layers[max_layer]['directory']))
ri_summaryxref_df = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import datetime as dt
from datetime import timedelta as td
import os
import re as reg
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import numpy as np
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from plotly.subplots import make_subplots
import plotly.graph_objs as go
from plotly.offline import plot
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import pandas as pd
#Constant
EPOCH_TIME = datetime.datetime.utcfromtimestamp(0)
#Functions
def access_csv_url(url):
"""This python module downloads the csv from a url passed
and return the dataframe else raises a FileNotFoundError exception"""
df = pd.read_csv(url)
if df.empty is False:
return df
else:
raise FileNotFoundError
def get_company_name(company_df):
"""lvl1 @2 : Following line loads the initial data of list of companies"""
company_symbol = ''
while (company_df.empty is False and company_symbol == ''):
surety_value = input("Are you sure of company symbol you want to search? (Y/N/E(Exit)) : ")
if surety_value.upper() == 'Y' or surety_value.upper() == 'N':
if surety_value.upper() == 'N':
search_dict = company_search('', company_df)
if len(search_dict) == 0:
print("\n No related results found, Give it another Try!!")
continue
elif len(search_dict) > 0:
if len(search_dict) > 10:
print("Showing Top 10 results for your search which gave ", len(search_dict), " results")
else:
print("found ", str(len(search_dict)), "results")
print(" \t Symbol \t Name")
print("\t _________", "\t", "_________")
for index, key in enumerate(search_dict.keys()):
if index+1 == 11:
break
else:
print("\t", key, "\t\t", search_dict[key])
surety_value = input("Have you found your symbol yet ? Y/N : ")
if surety_value.upper() == 'N' or surety_value.upper() == 'Y':
if surety_value.upper() == 'Y':
company_symbol = input("Enter the final symbol : ")
search_dict = company_search(company_symbol, company_df)
if len(search_dict) > 1:
print("Your search resulted into multiple results, please reselect your company!")
company_symbol = '' #resetting the value so that value can be input again
elif len(search_dict) == 0:
print("Your search yielded no results")
company_symbol = ''
else:
continue
else:
print("please choose only Y or N or y or n or E or e")
continue
elif surety_value.upper() == 'Y':
company_symbol = input("Enter the final symbol : ")
search_dict = company_search(company_symbol, company_df)
if len(search_dict) > 1:
print("Your search resulted into multiple results, please reselect your company!")
company_symbol = '' #resetting the value so that value can be input again
elif len(search_dict) == 0:
print("Your search yielded no results")
company_symbol = ''
elif surety_value.upper() == 'E':
company_symbol = ''
break
else:
print("please choose only Y or N or y or n")
continue
return company_symbol.upper()
def file_exists(filename, filepath):
file_tree = [file for file in os.listdir(filepath) if os.path.isfile(file)]
if filename not in file_tree:
return False
else:
return True
def update_company_data(company_symbol):
"""If a file does not exit then data will be downloaded from the website and save in the file with that company name"""
file_name = (str(company_symbol)+'.csv')
existing_file = file_exists(file_name, '.')
end_date = dt.date(dt.utcnow() - td(seconds=14400))
if existing_file is False:
alpha_url = f"http://quotes.wsj.com/{company_symbol}/historical-prices/download?MOD_VIEW=page%20&num_rows=7500&range_days=7500&startDate=11/01/1970%20&endDate={end_date}" #mm/dd/yyyy
company_data = pd.read_csv(alpha_url)
company_data.columns = [col_name.lstrip() for col_name in company_data.columns]
company_data['Date'] = pd.to_datetime(company_data['Date'], format='%m/%d/%y')
company_data['Date'] = company_data['Date'].dt.date
company_data = company_data.sort_values(by='Date')
if not company_data.empty:
company_data.to_csv(f"{company_symbol}.csv")
else:
"""if the file exists, read the last line and update the data until todays date"""
company_data = pd.read_csv(file_name, index_col=0)
company_data['Date'] = company_data['Date'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date())
row = company_data.sort_values('Date').tail(1)
date = row['Date']
date = str(date).split()
date = datetime.datetime.strptime(date[1], '%Y-%m-%d').date() + td(days=1)
if end_date != date:
remaining_df = pd.read_csv('http://quotes.wsj.com/'+company_symbol+'/historical-prices/download?MOD_VIEW=page%20&num_rows=7500&range_days=7500&startDate='+str(date.month)+'/'+str(date.day)+'/'+str(date.year)+'%20&endDate='+str(end_date.month)+'/'+str(end_date.day)+'/'+str(end_date.year))
remaining_df.columns = company_data.columns
remaining_df['Date'] = pd.to_datetime(remaining_df['Date'], format='%m/%d/%y')
remaining_df['Date'] = remaining_df['Date'].dt.date
company_data = company_data.append(remaining_df, sort=False)
company_data.columns = [col_name.lstrip() for col_name in company_data.columns]
company_data['Date'] = pd.to_datetime(company_data['Date'], format='%Y-%m-%d')
company_data['Date'] = company_data['Date'].dt.date
company_data = company_data.sort_values(by='Date')
company_data.reset_index(inplace=True, drop=True)
company_data.to_csv(str(company_symbol)+'.csv')
return company_data
def print_menu(company_symbol):
"""This prints the main user menu with dynamic company name"""
print("/" + "-" * 56 + "\\")
#print(f"\t\t USER MENU: {company_symbol}")
print(f"\t Stock Analysis MENU of {company_symbol}\t\t\t\t ")
print("|" + "-" * 56 + "|")
print("| 1. Current Data\t\t\t\t\t |")
print("| 2. Summary Statistic \t\t\t\t |")
print("| 3. Raw time-series \t\t\t\t\t |")
print("| 4. Linear trend line \t\t\t\t |")
print("| 5. Moving Averages \t\t\t\t\t |")
print("| 6. Predict close price for a day \t\t\t |")
print("| 7. Enhance Prediction \t\t\t\t |")
print("| 8. Predict close price for N-future days\t\t |")
print("| 9. Compare 2 companies using candlestick chart\t |")
print("| 10. Analyse with new start and end date\t\t |")
print("| 11. Search New Company \t\t\t\t |")
print("| 12. Exit \t\t\t\t\t\t |")
print("\\" + "-" * 56 + "/")
def date_validation(start_date, end_date):
try:
#Check for format of start date, It should be in format of YYYY-MM-DD
datetime.datetime.strptime(start_date, "%Y-%m-%d")
except ValueError:
#If any errors, raise an exception
print("Incorrect Start Date Format")
return '1'
try:
#Check for format of start date, It should be in format of YYYY-MM-DD
datetime.datetime.strptime(end_date, "%Y-%m-%d")
except ValueError:
#If any errors, raise an exception
print("Incorrect End Date Format")
return '2'
try:
#Start Date cannot be later than today
if datetime.datetime.strptime(start_date, "%Y-%m-%d") >= dt.today():
raise ValueError
except ValueError:
#If any errors, raise an exception
print("Start Date cannot be greater than today's date")
return '3'
try:
#End date cannot be greater than today
if datetime.datetime.strptime(end_date, "%Y-%m-%d") >= dt.today():
raise ValueError
except ValueError:
#If any errors, raise an exception
print("End Date cannot be greater than today's date")
return '4'
try:
#Start date can not greater than end date
if datetime.datetime.strptime(start_date, "%Y-%m-%d") >= datetime.datetime.strptime(end_date, "%Y-%m-%d"):
raise ValueError
except ValueError:
print("Start Date should be less than End date")
return '5'
def period_validation(start_date, end_date, period):
try:
#Period should be greater than 0 and less than days between start date and end date
if period < (end_date - start_date).days and period > 0:
return False
else:
raise ValueError
except ValueError:
print('Incorrect value of Window')
return '1'
def current_data(company_symbol):
"""This API gives statistical data of the last working business day"""
last_stats_url = "https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=" + company_symbol + "&apikey=T11CBFXU1UTRD2KG&datatype=csv"
last_stats = | pd.read_csv(last_stats_url) | pandas.read_csv |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
| pd.Timestamp('2015-01-21') | pandas.Timestamp |
import os
import shutil
import sys
from time import sleep, time
from uuid import uuid4
import numpy as np
import pandas as pd
from data_manager2 import file_processor
from returns_quantization import add_returns_in_place
from utils import *
np.set_printoptions(threshold=np.nan)
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
import os
import sqlite3
import pandas as pd
from nempy import markets, historical_spot_market_inputs as hi
from time import time
# Create a list of the historical dispatch intervals to be used.
dispatch_intervals = hi.datetime_dispatch_sequence(start_time='2020/01/02 00:00:00',
end_time='2020/01/03 00:00:00')
# Build a database of historical inputs if it doesn't already exist.
if not os.path.isfile('historical_inputs.db'):
con = sqlite3.connect('historical_inputs.db')
# Create a data base manager.
inputs_manager = hi.DBManager(connection=con)
# This is the first time the database has been used so we need to add the tables.
inputs_manager.create_tables()
# Download the relevant historical data from http://nemweb.com.au/#mms-data-model and into the database.
inputs_manager.DUDETAILSUMMARY.set_data(year=2020, month=1) # Unit information
inputs_manager.BIDPEROFFER_D.add_data(year=2020, month=1) # historical volume bids
inputs_manager.BIDDAYOFFER_D.add_data(year=2020, month=1) # historical price bids
inputs_manager.DISPATCHLOAD.add_data(year=2020, month=1) # unit operating limits
inputs_manager.DISPATCHREGIONSUM.add_data(year=2020, month=1) # historical demand
inputs_manager.INTERCONNECTOR.set_data(year=2020, month=1) # Regions connected by interconnector
inputs_manager.DISPATCHINTERCONNECTORRES.add_data(year=2020, month=1) # Interconnectors in each dispatch interval
inputs_manager.INTERCONNECTORCONSTRAINT.set_data(year=2020, month=1) # Interconnector data
inputs_manager.LOSSFACTORMODEL.set_data(year=2020, month=1) # Regional demand coefficients in loss functions
inputs_manager.LOSSMODEL.set_data(year=2020, month=1) # Break points for linear interpolation of loss functions
con.close()
# Connect to the database of historical inputs
con = sqlite3.connect('historical_inputs.db')
inputs_manager = hi.DBManager(connection=con)
# List for saving inputs to.
outputs = []
# Create and dispatch the spot market for each dispatch interval.
for interval in dispatch_intervals:
# Transform the historical input data into the format accepted by the Spot market class.
# Unit info.
DUDETAILSUMMARY = inputs_manager.DUDETAILSUMMARY.get_data(interval)
DUDETAILSUMMARY = DUDETAILSUMMARY[DUDETAILSUMMARY['DISPATCHTYPE'] == 'GENERATOR']
unit_info = hi.format_unit_info(DUDETAILSUMMARY)
# Unit bids.
BIDPEROFFER_D = inputs_manager.BIDPEROFFER_D.get_data(interval)
BIDPEROFFER_D = BIDPEROFFER_D[BIDPEROFFER_D['BIDTYPE'] == 'ENERGY']
volume_bids = hi.format_volume_bids(BIDPEROFFER_D)
BIDDAYOFFER_D = inputs_manager.BIDDAYOFFER_D.get_data(interval)
BIDDAYOFFER_D = BIDDAYOFFER_D[BIDDAYOFFER_D['BIDTYPE'] == 'ENERGY']
price_bids = hi.format_price_bids(BIDDAYOFFER_D)
# The unit operating conditions at the start of the historical interval.
DISPATCHLOAD = inputs_manager.DISPATCHLOAD.get_data(interval)
unit_limits = hi.determine_unit_limits(DISPATCHLOAD, BIDPEROFFER_D)
# Demand on regional basis.
DISPATCHREGIONSUM = inputs_manager.DISPATCHREGIONSUM.get_data(interval)
regional_demand = hi.format_regional_demand(DISPATCHREGIONSUM)
# Interconnector details.
INTERCONNECTOR = inputs_manager.INTERCONNECTOR.get_data()
INTERCONNECTORCONSTRAINT = inputs_manager.INTERCONNECTORCONSTRAINT.get_data(interval)
interconnectors = hi.format_interconnector_definitions(INTERCONNECTOR,
INTERCONNECTORCONSTRAINT)
interconnector_loss_coefficients = hi.format_interconnector_loss_coefficients(INTERCONNECTORCONSTRAINT)
LOSSFACTORMODEL = inputs_manager.LOSSFACTORMODEL.get_data(interval)
interconnector_demand_coefficients = hi.format_interconnector_loss_demand_coefficient(LOSSFACTORMODEL)
LOSSMODEL = inputs_manager.LOSSMODEL.get_data(interval)
interpolation_break_points = hi.format_interpolation_break_points(LOSSMODEL)
loss_functions = hi.create_loss_functions(interconnector_loss_coefficients, interconnector_demand_coefficients,
regional_demand.loc[:, ['region', 'loss_function_demand']])
# Create a market instance.
market = markets.Spot()
# Add generators to the market.
market.set_unit_info(unit_info.loc[:, ['unit', 'region']])
# Set volume of each bids.
volume_bids = volume_bids[volume_bids['unit'].isin(list(unit_info['unit']))]
market.set_unit_volume_bids(volume_bids.loc[:, ['unit', '1', '2', '3', '4', '5',
'6', '7', '8', '9', '10']])
# Set prices of each bid.
price_bids = price_bids[price_bids['unit'].isin(list(unit_info['unit']))]
market.set_unit_price_bids(price_bids.loc[:, ['unit', '1', '2', '3', '4', '5',
'6', '7', '8', '9', '10']])
# Set unit operating limits.
market.set_unit_capacity_constraints(unit_limits.loc[:, ['unit', 'capacity']])
market.set_unit_ramp_up_constraints(unit_limits.loc[:, ['unit', 'initial_output', 'ramp_up_rate']])
market.set_unit_ramp_down_constraints(unit_limits.loc[:, ['unit', 'initial_output', 'ramp_down_rate']])
# Set regional demand.
market.set_demand_constraints(regional_demand.loc[:, ['region', 'demand']])
# Create the interconnectors.
market.set_interconnectors(interconnectors)
# Create loss functions on per interconnector basis.
market.set_interconnector_losses(loss_functions, interpolation_break_points)
# Calculate dispatch.
market.dispatch()
print('Dispatch for interval {} complete.'.format(interval))
# Save prices from this interval
prices = market.get_energy_prices()
prices['time'] = interval
outputs.append(prices)
con.close()
print( | pd.concat(outputs) | pandas.concat |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = | Period(freq='D', year=2007, month=3, day=31) | pandas.Period |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time as timelib
def ShannonEntropy(P,normalize=False):
P=np.array(P)
if normalize:
P=P/P.sum()
P=P[P>1e-20]
return -np.sum(P*np.weblog2(P));
def proportional_abundance(weblog,field):
if weblog.shape[0]==0:
raise AssertionError('Empty weblog.')
histogram=weblog[field].value_counts()
pa_df=histogram/histogram.values.sum()
if abs(1.0-pa_df.values.sum())>1e-8:
raise AssertionError("ERROR: Proportional abundance distribution does not sum up to one.")
return pa_df.values,list(pa_df.index);
def session_draw_bis_melty(sessions_id, weblog,weblog_columns_dict):
"""
Draw the graph of sessions with sessions_id given in entry
"""
from graph_tool.all import Graph
from graph_tool.all import graph_draw
session = weblog[weblog.session_id==sessions_id]
session = session.rename(index=str,columns = {weblog_columns_dict['requested_page_column']:'requested_page',\
weblog_columns_dict['referrer_page_column']:'referrer_page'})
s_pages = session[['requested_page','requested_external']]
s_pages_ref = session[['referrer_page','referrer_external']]
s_pages_ref = s_pages_ref.rename(index = str, columns = {'referrer_page':'requested_page','referrer_external':'requested_external'})
s_pages = s_pages.append(s_pages_ref)
s_pages.drop_duplicates(subset = 'requested_page',inplace=True)
g = Graph()
v = {}
halo = g.new_vertex_property("bool")
for row in s_pages.itertuples():
v[row.requested_page] = g.add_vertex()
if row.requested_external:
halo[v[row.requested_page]] = True
else:
halo[v[row.requested_page]] = False
session.apply(lambda x: g.add_edge(v[x.referrer_page], v[x.requested_page]), axis=1)
graph_draw(g, vertex_halo=halo, output="./_session"+str(sessions_id)+".png")
return;
def session_draw_bis(sessions_id, weblog,weblog_columns_dict):
"""
Draw the graph of sessions with sessions_id given in entry
"""
from graph_tool.all import Graph
from graph_tool.all import graph_draw
session = weblog[weblog.session_id==sessions_id]
session = session.rename(index=str,columns = {weblog_columns_dict['requested_page_column']:'requested_page',\
weblog_columns_dict['referrer_page_column']:'referrer_page'})
s_pages = session['requested_page']
s_pages_ref = session['referrer_page']
#s_pages_ref = s_pages_ref.rename(index = str, columns = {'referrer_page':'requested_page'})
s_pages = s_pages.append(s_pages_ref)
s_pages.drop_duplicates(inplace=True)
g = Graph()
v = {}
for page in s_pages.values:
v[page] = g.add_vertex()
session.apply(lambda x: g.add_edge(v[x.referrer_page], v[x.requested_page]), axis=1)
graph_draw(g,output="../graph_dump/_session"+str(sessions_id)+".png")
return;
def mosaic(session_data, weblog, features, type_cluster, weblog_columns_dict,filename = None, verbose = False):
"""
Plot cluster mosaic: take the 5 most representatives sessions of each cluster, draw the graph of each of them, show the timespan
and when requests are made along timespan axe
Parameters
----------
session_data: pandas dataframe of requests
weblog: pandas dataframe of requests
features: list of string, to calculate centroids of clusters
type_cluster: string
weblog_columns_dict: dict
Returns
-------
None
"""
if verbose== True:
start_time = timelib.time()
print("\n * Computing and plotting cluster mosaic ...")
try:
from graph_tool.all import Graph
from graph_tool.all import graph_draw
num_cluster = session_data[type_cluster].unique()
num_cluster.sort()
# compute centroids in order to select pertinent sessions
centroids = pd.DataFrame(columns=["cluster_id"] + features)
centroids["cluster_id"] = num_cluster
for dim in features:
mean = []
for cluster_id in num_cluster:
mean.append(session_data[session_data[type_cluster]==cluster_id][dim].mean())
centroids[dim] = mean
# select sessions representative of each cluster
start_time = timelib.time()
selected_sessions = {}
centroids["sum"] = centroids[features].sum(axis=1)
session_data["dist"] = session_data[type_cluster].map(pd.Series(data=centroids["sum"], index=centroids.cluster_id.values))
session_data["dist"] = session_data[features].sum(axis=1) - session_data["dist"]
session_data["dist"] = np.sqrt((session_data["dist"] * session_data["dist"]))
N_max_sessions=5 # Sessions to plot per cluster
for cluster_id in num_cluster:
selected_sessions[cluster_id] = list(session_data[session_data[type_cluster]==cluster_id].sort_values(["dist"]).session_id.values)[:N_max_sessions]
cluster_sessions = session_data[session_data[type_cluster] == cluster_id].session_id.unique()
cluster_weblog = weblog[weblog.session_id.isin(cluster_sessions)]
# Retrieving data for the N sessions sample
session_data_df = pd.DataFrame(columns=['id','start','end','timespan','span_sec'])
sessions=selected_sessions[cluster_id]
session_data_df['id']=sessions # The number of the sessions to plot for this cluster
session_start=cluster_weblog[[weblog_columns_dict['timestamp_column'],'session_id']].groupby('session_id').min()
session_end=cluster_weblog[[weblog_columns_dict['timestamp_column'],'session_id']].groupby('session_id').max()
session_data_df['start']=session_data_df.id.map(pd.Series(data=session_start\
[weblog_columns_dict['timestamp_column']].values,index=session_start.index))
session_data_df['end']=session_data_df.id.map(pd.Series(data=session_end\
[weblog_columns_dict['timestamp_column']].values,index=session_end.index))
session_data_df['timespan']=session_data_df.apply(lambda row: pd.Timedelta( | pd.Timestamp(row.end) | pandas.Timestamp |
from django.shortcuts import render_to_response
from django.utils.cache import patch_response_headers
from django.http import JsonResponse
from core.views import initRequest, login_customrequired
from core.utils import is_json_request
from core.iDDS.useconstants import SubstitleValue
from core.iDDS.rawsqlquery import getRequests, getTransforms, getWorkFlowProgressItemized
from core.iDDS.algorithms import generate_requests_summary, parse_request
from core.libs.exlib import lower_dicts_in_list
from core.libs.DateEncoder import DateEncoder
import pandas as pd
CACHE_TIMEOUT = 20
OI_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
subtitleValue = SubstitleValue()
def prepare_requests_summary(workflows):
summary = {'status': {}, 'username': {}}
"""
completion
age
"""
for workflow in workflows:
summary['status'][workflow['r_status']] = summary['status'].get(workflow['r_status'], 0) + 1
if workflow['username'] == '':
workflow['username'] = "Not set"
summary['username'][workflow['username']] = summary['username'].get(workflow['username'], 0) + 1
return summary
def get_workflow_progress_data(request_params, **kwargs):
workflows_items = getWorkFlowProgressItemized(request_params, **kwargs)
workflows_items = pd.DataFrame(workflows_items)
workflows_semi_grouped = []
if not workflows_items.empty:
workflows_items.USERNAME.fillna(value='', inplace=True)
workflows_pd = workflows_items.astype({"WORKLOAD_ID":str}).astype({"R_CREATED_AT":str}).groupby(['REQUEST_ID', 'R_STATUS', 'P_STATUS', 'R_NAME', 'USERNAME']).agg(
PROCESSING_FILES_SUM=pd.NamedAgg(column="PROCESSING_FILES", aggfunc="sum"),
PROCESSED_FILES_SUM=pd.NamedAgg(column="PROCESSED_FILES", aggfunc="sum"),
TOTAL_FILES=pd.NamedAgg(column="TOTAL_FILES", aggfunc="sum"),
P_STATUS_COUNT= | pd.NamedAgg(column="P_STATUS", aggfunc="count") | pandas.NamedAgg |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = | pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg']) | pandas.Series |
import pandas
from msdss_models_api.models import Model
def create_init_method(can_input=True, can_output=True, can_update=True):
"""
Create model init method for scikit-learn models to be compatible with :class:`msdss_models_api:msdss_models_api.models.Model`.
See :class:`msdss_models_api:msdss_models_api.models.Model`.
Parameters
----------
can_input : bool
Whether the method ``.input`` is defined and available. This is useful for controlling route requests in an API.
can_output : bool
Whether the method ``.output`` is defined and available. This is useful for controlling route requests in an API.
can_update : bool
Whether the method ``.update`` is defined and available. This is useful for controlling route requests in an API.
Author
------
<NAME> <<EMAIL>>
Example
-------
.. jupyter-execute::
from msdss_models_sklearn.tools import *
from sklearn.linear_model import LinearRegression
input = create_input_method(LinearRegression)
"""
def init(self, can_input=can_input, can_output=can_output, can_update=can_update, *args, **kwargs):
Model.__init__(self, can_input=can_input, can_output=can_output, can_update=can_update, *args, **kwargs)
return init
def create_input_method(model):
"""
Create model input method for scikit-learn models to be compatible with :class:`msdss_models_api:msdss_models_api.models.Model`.
See :meth:`msdss_models_api:msdss_models_api.models.Model.input`.
Parameters
----------
model : class
Scikit-learn model class to create machine learning models.
Author
------
<NAME> <<EMAIL>>
Example
-------
.. jupyter-execute::
from msdss_models_sklearn.tools import *
from sklearn.linear_model import LinearRegression
input = create_input_method(LinearRegression)
"""
def input(self, data, x=None, y=None, _fit={}, *args, **kwargs):
# (create_input_method_vars) Set default vars
x = x if x else self.settings['x'] if 'x' in self.settings else x
y = y if y else self.settings['y'] if 'y' in self.settings else y
# (create_input_method_data) Format data for model instance input
data = | pandas.DataFrame(data) | pandas.DataFrame |
'''
# Section 1: Business Understanding
For this project, I was interestested in using **Seattle Airbnb Data from 2016** to better understand:
1. What kind of accommodations do may I book?
2. Which period of the year has the highest number of listings? By how much do prices spike?
3. What are the cheapest and most expensive neighborhoods?
4. What are the factors that most impact the price?
You can find the full set of files related to anaylises in: https://www.kaggle.com/airbnb/seattle/data.
Also, check out my medium post at: https://matsuch.medium.com/can-you-guess-the-best-time-to-visit-seattle-24025ab7da70
'''
## Opening libraries
#import algebra linear and data manipulation
import numpy as np
import pandas as pd
from collections import defaultdict
#Plot
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
#Machine Learning
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
## Information gathering
#opening dataframes
df_review = pd.read_csv('./reviews.csv')
df_listings = pd.read_csv('./listings.csv')
df_calendar = pd.read_csv('./calendar.csv')
# Section 2: Data Undestanding
#==============================================================================
#Spliting dataframes into categories - Host, Review, quarto, bairro
#==============================================================================
host = df_listings[['host_id','host_is_superhost', 'host_response_rate', 'host_response_time']]
review = df_listings[['id','number_of_reviews', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness',
'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value']]
quarto = df_listings[['room_type', 'bathrooms', 'bedrooms', 'beds', 'bed_type', 'accommodates','property_type']]
bairro = df_listings[['neighbourhood', 'neighbourhood_cleansed','neighbourhood_group_cleansed']]
#==============================================================
#Data treatment - NaN Values and change object columns in floats
#==============================================================
#host
print('before % of null values : \n', host.isnull().sum()/len(host)*100, '\n') #% NaN values
#transform the host_is_superhost in float
host['host_is_superhost'] = host['host_is_superhost'].map({'f':1,'t':0}) #change f and t for 1 and 0
#Transform the responde_rate column in str and flot to find the mean
host['host_response_rate_num'] = host['host_response_rate'].astype(str)
host['host_response_rate_num'] = host['host_response_rate_num'].str.replace("%", "").astype("float")
#Change responde_time into float and fill with the mean
host['host_response_time'] = host['host_response_time'].map({'within a few hours':6,
'within an hour':1,
'within a day':24,
'a few days or more':48})
#Below I was trying to fill all nan values in responde_rate and response_time by the host behavior
#Since it would be the same person, i assumed that the response would be close for each one.
#however the results was not as expected.
#fill host activity by its mean
host['host_response_rate_num'] = host['host_response_rate_num'].fillna(host.groupby(['host_id'])
['host_response_rate_num'].transform('mean'))
host['host_response_time'] = host['host_response_time'].fillna(host.groupby(['host_id'])
['host_response_time'].transform('mean'))
print('after % of null values : \n', host.isnull().sum()/len(host)*100, '\n') #% NaN values
#Tried to fill the nan with the mean, however since it doesn't I decided to drop all those rows to have a complete dataset
host = host.drop(['host_response_rate'], axis = 1) #drop the old response rate column
hostnew = host.dropna() #drop the remaining nan values, since the new df has 86% of the original size
print('Size comparison between the old and new host df :', len(hostnew)/len(host), '\n')
#review
#fill all columns with the mean - new approach
'''
review['review_scores_value'] = review.groupby('id')['review_scores_value'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_rating'] = review.groupby('id')['review_scores_rating'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_accuracy'] = review.groupby('id')['review_scores_accuracy'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_cleanliness'] = review.groupby('id')['review_scores_cleanliness'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_checkin'] = review.groupby('id')['review_scores_checkin'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_communication'] = review.groupby('id')['review_scores_communication'].transform(lambda x: x.fillna(x.mean()))
review['review_scores_location'] = review.groupby('id')['review_scores_location'].transform(lambda x: x.fillna(x.mean()))
#NEED TO FIND A FASTER WAY TO PERFORM THIS
'''
print('Sum of nan values per column: \n',review.isnull().sum())
print('number of listings with no reviews: ', (review['number_of_reviews'] == 0).sum())
#quarto
print(quarto.isnull().sum()) #count null columns
quartonew=quarto.dropna() #drop null values
#bairro
print(quarto.isnull().sum()) #count null columns
bairronew=bairro.dropna() ##drop null values, less than 10% - most of the ananysis here need neighbourhood information
# Section 3: Data Preparation
#===============
#Data treatment
#==============
#creat new df
df_novo = pd.concat((review, quartonew, hostnew, bairronew), axis=1)
#split date information in year and month, drop the original date column
df_calendar['date'] = pd.to_datetime(df_calendar['date'], format= '%Y/%m/%d') #set date in datetime
df_calendar['ano'] = df_calendar['date'].dt.year #create a year column
df_calendar['mês'] = df_calendar['date'].dt.month #create a month column
df_calendar.drop(['date'], axis=1, inplace=True) #drop the old date column
df_novo.rename(index=str, columns={'id': 'listing_id'}, inplace=True) #change the 'id' column name to be the same as the calendar
df = | pd.merge(df_calendar, df_novo, on='listing_id') | pandas.merge |
import pandas as pd
import numpy as np
from scipy import signal
from eeg_globals import *
def cut_signal(eeg_df, dic_cut_opts):
"""
Cut signal into small windows with overlapping
Return:
A new dataframe
"""
sample_win = int(PSF * dic_cut_opts['window'])
sample_over = int(PSF * dic_cut_opts['overlap'])
sample_stride = sample_win - sample_over
# To data, add a column observation based on phase
print('split data into observations')
eeg_window = []
for subject in eeg_df.subject.unique():
print(subject)
for test in eeg_df.test.unique():
print(' ' + str(test))
for phase in eeg_df.phase.unique():
print(' ' + str(phase))
df = eeg_df.loc[(eeg_df.subject==subject) &
(eeg_df.test==test) &
(eeg_df.phase==phase)].copy()
df = df.reset_index(drop=True)
n_intervals = int(np.floor(( df.shape[0] - sample_win ) / sample_stride) + 1)
for k in range(n_intervals):
data = df.iloc[k * sample_stride : k * sample_stride + sample_win].copy()
data = data.reset_index(drop=True)
data['observation'] = k + 1
eeg_window.append(data)
del data
del df
eeg_window = pd.concat(eeg_window, axis=0, ignore_index=True)
print(eeg_window.shape)
return eeg_window
def cut_signal_simulator(eeg_df, dic_cut):
labels = list(eeg_df.columns)
sample_win = int(PSF * dic_cut['window'])
sample_over = int(PSF * dic_cut['overlap'])
sample_stride = sample_win - sample_over
# To data, add a column observation based on phase
print('split data into observations')
eeg_window = []
for subject in eeg_df.subject.unique():
print(subject)
for flight_number in eeg_df.flight_number.unique():
print(' ' + str(flight_number))
for phase in eeg_df.phase.unique():
print(' ' + str(phase))
df = eeg_df.loc[(eeg_df.subject==subject) &
(eeg_df.flight_number==flight_number) &
(eeg_df.phase==phase)].copy()
df = df.reset_index(drop=True)
n_intervals = int(np.floor(( df.shape[0] - sample_win ) / sample_stride) + 1)
for k in range(n_intervals):
data = df.iloc[k * sample_stride : k * sample_stride + sample_win].copy()
data = data.reset_index(drop=True)
data['observation'] = k + 1
eeg_window.append(data)
del data
del df
eeg_window = | pd.concat(eeg_window, axis=0, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[79]:
'''
https://github.com/bbmusa
'''
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
# In[2]:
import pandas as pd
# In[3]:
import numpy as np
# In[7]:
tickers = si.tickers_nifty50()
# In[17]:
tickers.remove('MM.NS')
# In[72]:
start_date = '2021-11-10'
end_date = '2022-02-14'
# In[73]:
'''
<NAME>'RSI, Indicator simply find blue chip gold mines for you.
'''
def download_all_stock_data(all_stock_symbols, start_date, end_date):
def download_stock_data(single_symbol):
print(' Downloading '+single_symbol+' data ')
# try:
tmp1=pdr.get_data_yahoo(single_symbol,start=start_date,end=end_date)
# except KeyError:
# pass
return(tmp1)
downloaded_data=map(download_stock_data,all_stock_symbols)
return(pd.concat(downloaded_data,keys=all_stock_symbols, names=['Ticker','Date']))
# In[74]:
stock_data=download_all_stock_data(tickers, start_date, end_date)
fileName = 'downloadedData.pkl'
stock_data.to_pickle(fileName)
# In[80]:
'''
RSI = 100-{100/(1+RS)}
RS= Average gain/Average Loss
This rsi is based on 14 periods which means:
+ first avrage gain = sum of gains over the 14 periods / 14
+ first avrage loss = sum of loss over the 14 periods / 14
'''
def compute_RSI(data,period_RSI):
differencePrice = data['Close'].diff()
differencePriceValues=differencePrice.values
positive_differences=0
negative_differences=0
current_average_positive=0
current_average_negative=0
price_index=0
RSI=[]
for difference in differencePriceValues[1:]:
if difference>0:
positive_difference=difference
negative_difference=0
if difference<0:
negative_difference=np.abs(difference)
positive_difference=0
if difference==0:
negative_difference=0
positive_difference=0
if (price_index<period_RSI):
current_average_positive=current_average_positive+(1/period_RSI)*positive_difference
current_average_negative=current_average_negative+(1/period_RSI)*negative_difference
if(price_index==(period_RSI-1)):
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
else:
current_average_positive=((period_RSI-1)*current_average_positive+positive_difference)/(period_RSI)
current_average_negative=((period_RSI-1)*current_average_negative+negative_difference)/(period_RSI)
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
price_index=price_index+1
RSI_series= | pd.Series(data=RSI,index=differencePrice.index[period_RSI:]) | pandas.Series |
from collections import defaultdict
from subprocess import Popen, PIPE
from multiprocessing import Process
from loguru import logger
import pandas as pd
import signal
import os
class ProcessInfo:
"""
class for processes' information
"""
def __init__(self,uuid,pid,descrition,used_memory):
self.pid = int(pid)
self.uuid = uuid
self.descrition = descrition
self.used_memory = int(used_memory)
class GPU:
"""
class for per GPU device's information
"""
def __init__(self, GPU_type, GPU_index,uuid, utilizationGPU, memoryTotal, memoryUsed, memoryFree, driver):
self.GPU_type = GPU_type
self.GPU_index = GPU_index
self.uuid = uuid
self.utilizationGPU = utilizationGPU
self.memoryTotal = float(memoryTotal)
self.memoryUsed = float(memoryUsed)
self.memoryFree = float(memoryFree)
self.driver = driver
self.task = None
def GPU_info(self):
return [self.GPU_type,self.GPU_index,self.utilizationGPU,self.memoryTotal,self.memoryUsed,self.memoryFree,self.memoryUsed/self.memoryTotal,len(self.task)]
## get task information from device
def tasks_info(self):
if len(self.task) == 0:
return None
else:
col = ['device','pid','descrition','used_memory']
df = pd.DataFrame(columns=col)
for item in self.task:
df.loc[len(df)] = [self.GPU_index,item.pid,item.descrition,item.used_memory]
return df
## kill process
def kill_process(self):
try:
pids = self.tasks_info()['pid'].values
print(pids)
except:
print(f"there are no tasks on device {self.GPU_index}!")
return
for item in pids:
os.kill(item, signal.SIGKILL)
print("You kill the processes ", item, "successfully!")
class GPUgo():
"""
class for analysis the device's information
"""
def __init__(self,):
self.GPUs = self.GetGpuInfo()
def MyGpuInfo(self):
col = ['device_type','device_id','utilizationGPU(%)','memoryTotal(MB)','memoryUsed(MB)','memoryFree(MB)','memoryusedPercent(%)','task_num']
df = pd.DataFrame(columns=col)
for gpu in self.GPUs:
df.loc[len(df)] = gpu.GPU_info()
return df
def ShowmyGpuInfo(self):
print('====='*25)
df = self.MyGpuInfo()
print(df.to_string(index=False))
print('\n')
print('-----'*25)
print(self.GpuProcessInfo())
print('====='*25)
def GetGpuInfo(self):
GPUs= []
GPU_info = Popen(["nvidia-smi","--query-gpu=name,index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version", "--format=csv,noheader,nounits"], stdout=PIPE)
stdout, stderror = GPU_info.communicate()
GPU_info = stdout.decode('UTF-8').strip().split('\n')
GPU_info = [i.split(', ' ) for i in GPU_info]
for i in range(len(GPU_info)):
GPUs.append(GPU(*GPU_info[i]))
process_info = Popen(["nvidia-smi","--query-compute-apps=gpu_uuid,pid,process_name,used_memory", "--format=csv,noheader,nounits"], stdout=PIPE)
stdout, stderror = process_info.communicate()
process_info = stdout.decode('UTF-8').strip().split('\n')
process_info = [i.split(', ') for i in process_info]
for gpu in GPUs:
processes = []
for item in process_info:
if gpu.uuid == item[0]:
processes.append(ProcessInfo(*item))
gpu.task = processes
return GPUs
def GpuProcessInfo(self):
col = ['device','pid','descrition','used_memory']
df = | pd.DataFrame(columns=col) | pandas.DataFrame |
from sklearn.cluster import KMeans
from scipy.stats import zscore
import h5py as h5
import numpy as np
import pandas as pd
import random
from typing import List
import sys
from prismx.utils import quantile_normalize, normalize
np.seterr(divide='ignore', invalid='ignore')
def calculateCorrelation(h5file: str, clustering: pd.DataFrame, geneidx: List[int], clusterID: str="global", maxSampleCount: int=2000) -> List:
'''
Returns correlation matrix for specified samples
Parameters:
h5file (string): path to expression h5 file
clustering (pandas DataFrame): sample mappings
geneidx (array of int): array of gene indices
Returns:
correlation coefficients (pandas DataFrame)
average sample correlation
'''
f = h5.File(h5file, 'r')
samples = f['meta/samples/geo_accession']
genes = f['meta/genes/genes']
if clusterID == "global":
samplesidx = sorted(random.sample(range(len(samples)), min(maxSampleCount, len(samples))))
else:
samplesidx = np.where(clustering.loc[:,"clusterID"] == int(clusterID))[0]
if maxSampleCount > 2: samplesidx = sorted(random.sample(set(samplesidx), min(len(samplesidx), maxSampleCount)))
exp = f['data/expression'][:,samplesidx][geneidx,:]
qq = normalize(exp, transpose=False)
exp = 0
cc = np.corrcoef(qq)
cc = np.nan_to_num(cc)
qq = 0
correlation = pd.DataFrame(cc, index=genes[geneidx], columns=genes[geneidx], dtype=np.float16)
correlation.index = [x.upper() for x in genes[geneidx]]
correlation.columns = [x.upper() for x in genes[geneidx]]
f.close()
cc = 0
np.fill_diagonal(correlation.to_numpy(), float('nan'))
return(correlation)
def createClustering(h5file: str, geneidx: List[int], geneCount: int=500, clusterCount: int=50, deterministic: bool=True) -> pd.DataFrame:
'''
Returns cluster association for all samples in input expression h5 file
Parameters:
h5file (string): path to expression h5 file
geneIndices (array type int): indices of genes
geneCount (int) count of genes used for clustering
clusterCount (int): number of clusters
Returns:
sample cluster mapping (pandas.DataFrame)
'''
if deterministic:
random.seed(42)
f = h5.File(h5file, 'r')
expression = f['data/expression']
samples = list(f['meta/samples/geo_accession'])
genes = sorted(random.sample(geneidx, geneCount))
exp = expression[genes, :]
f.close()
qq = normalize(exp, transpose=False)
qq = pd.DataFrame(zscore(qq, axis=1)).fillna(0)
exp = 0
kmeans = KMeans(n_clusters=clusterCount, random_state=42).fit(qq.transpose())
qq = 0 # keep memory footprint low
clustering = kmeans.labels_
kmeans = 0 # keep memory footprint low
clusterMapping = | pd.DataFrame({'sampleID': samples, 'clusterID': clustering}, index = samples, columns=["sampleID", "clusterID"]) | pandas.DataFrame |
import logging
import multiprocessing
import os
from collections import defaultdict
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from sklearn.model_selection import KFold, StratifiedKFold, RepeatedKFold, RepeatedStratifiedKFold, train_test_split
from .constants import BINARY, REGRESSION, SOFTCLASS
logger = logging.getLogger(__name__)
def get_pred_from_proba(y_pred_proba, problem_type=BINARY):
if problem_type == BINARY:
y_pred = [1 if pred >= 0.5 else 0 for pred in y_pred_proba]
elif problem_type == REGRESSION:
y_pred = y_pred_proba
else:
y_pred = np.argmax(y_pred_proba, axis=1)
return y_pred
def generate_kfold(X, y=None, n_splits=5, random_state=0, stratified=False, n_repeats=1):
if stratified and (y is not None):
if n_repeats > 1:
kf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)
else:
kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state)
kf.get_n_splits(X, y)
return [[train_index, test_index] for train_index, test_index in kf.split(X, y)]
else:
if n_repeats > 1:
kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)
else:
kf = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
kf.get_n_splits(X)
return [[train_index, test_index] for train_index, test_index in kf.split(X)]
def generate_train_test_split(X: DataFrame, y: Series, problem_type: str, test_size: float = 0.1, random_state=42) -> (DataFrame, DataFrame, Series, Series):
if (test_size <= 0.0) or (test_size >= 1.0):
raise ValueError("fraction of data to hold-out must be specified between 0 and 1")
if problem_type in [REGRESSION, SOFTCLASS]:
stratify = None
else:
stratify = y
# TODO: Enable stratified split when y class would result in 0 samples in test.
# One approach: extract low frequency classes from X/y, add back (1-test_size)% to X_train, y_train, rest to X_test
# Essentially stratify the high frequency classes, random the low frequency (While ensuring at least 1 example stays for each low frequency in train!)
# Alternatively, don't test low frequency at all, trust it to work in train set. Risky, but highest quality for predictions.
X_train, X_test, y_train, y_test = train_test_split(X, y.values, test_size=test_size, shuffle=True, random_state=random_state, stratify=stratify)
if problem_type != SOFTCLASS:
y_train = pd.Series(y_train, index=X_train.index)
y_test = | pd.Series(y_test, index=X_test.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar(object):
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar( | Period('2014-01-01') | pandas.Period |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import *
from keras.models import load_model
import matplotlib.pyplot as plt
#################################################################
### Generate Data ###############################################
#################################################################
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# save training data to file
data = np.vstack((x,y)).T
np.savetxt('train_data.csv',data,header='x,y',comments='',delimiter=',')
# generate test data
x = np.linspace(0.0,2*np.pi,100)
y = np.sin(x)
# save test data to file
data = np.vstack((x,y)).T
np.savetxt('test_data.csv',data,header='x,y',comments='',delimiter=',')
#################################################################
### Scale data ##################################################
#################################################################
# load training and test data with pandas
train_df = pd.read_csv('train_data.csv')
test_df = pd.read_csv('test_data.csv')
# scale values to 0 to 1 for the ANN to work well
s = MinMaxScaler(feature_range=(0,1))
# scale training and test data
sc_train = s.fit_transform(train_df)
sc_test = s.transform(test_df)
# print scaling adjustments
print('Scalar multipliers')
print(s.scale_)
print('Scalar minimum')
print(s.min_)
# convert scaled values back to dataframe
sc_train_df = pd.DataFrame(sc_train, columns=train_df.columns.values)
sc_test_df = pd.DataFrame(sc_test, columns=test_df.columns.values)
# save scaled values to CSV files
sc_train_df.to_csv('train_scaled.csv', index=False)
sc_test_df.to_csv('test_scaled.csv', index=False)
#################################################################
### Train model #################################################
#################################################################
# create neural network model
model = Sequential()
model.add(Dense(1, input_dim=1, activation='linear'))
model.add(Dense(2, activation='linear'))
model.add(Dense(2, activation='tanh'))
model.add(Dense(2, activation='linear'))
model.add(Dense(1, activation='linear'))
model.compile(loss="mean_squared_error", optimizer="adam")
# load training data
train_df = pd.read_csv("train_scaled.csv")
X1 = train_df.drop('y', axis=1).values
Y1 = train_df[['y']].values
# train the model
model.fit(X1,Y1,epochs=5000,verbose=0,shuffle=True)
# Save the model to hard drive
#model.save('model.h5')
#################################################################
### Test model ##################################################
#################################################################
# Load the model from hard drive
#model = load_model('model.h5')
# load test data
test_df = | pd.read_csv("test_scaled.csv") | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series( | date_range("2000-01-01 09:00:00", periods=5, tz=tz) | pandas.date_range |
import requests as req
import pandas as pd
import datetime
import matplotlib.pyplot as plt
trades_df = pd.read_csv('trades.csv')
trades_df['DateOfTrade']= pd.to_datetime(trades_df['DateOfTrade'])
trades_df = trades_df.sort_values(by='DateOfTrade')
comp_columns = ['StartDate','EndDate','CCY','Amount']
composition = pd.DataFrame(columns=comp_columns)
for trade in trades_df.itertuples():
date_of_trade = getattr(trade,'DateOfTrade')
ccy = getattr(trade,'CCY')
amount = getattr(trade,'Amount')
if ccy not in composition['CCY'].to_list():
start_date = date_of_trade
end_date = pd.datetime(2050,12,31)
new_trade_df = pd.DataFrame({'StartDate':[start_date],'EndDate':[end_date],'CCY':[ccy],'Amount':[amount]})
composition = pd.concat([composition,new_trade_df])
else:
current_ccy = (composition['EndDate'] == '2050-12-31') & (composition['CCY'] == ccy)
if date_of_trade > composition.loc[current_ccy]['StartDate'][0]:
new_amount = composition.loc[current_ccy]['Amount'][0] + amount
if new_amount==0: #deletion
composition.loc[current_ccy, 'EndDate'] = date_of_trade
else:
composition.loc[current_ccy,'EndDate'] = date_of_trade
new_trade_df = pd.DataFrame({'StartDate': [date_of_trade], 'EndDate': [end_date], 'CCY': [ccy], 'Amount': [new_amount]})
composition = | pd.concat([composition, new_trade_df]) | pandas.concat |
import pandas as pd
import numpy as np
# the following dict maps the columns header in the raw LIDAR files to the desired new column names
def generateKeys():
"""
generates and returns a dictionary containing the original columns names from the
LIDAR file as values and the currently used column names as corresponding keys
ws_1 : Speed Value.1
dir_1 : Direction Value.1
h_1 : Node RT01 Lidar Height
"""
keys = {"ws_0" : "Speed Value", "dir_0" : "Direction Value", "h_0" : "Node RT00 Lidar Height"}
for i in range(1, 11):
keys.update({"ws_{}".format(i) : "Speed Value.{}".format(i),
"dir_{}".format(i) : "Direction Value.{}".format(i),
"h_{}".format(i) : "Node RT{:02d} Lidar Height".format(i+1),
})
return keys
# function to generate a new time index
def generateDateTime(df):
"""
combines date and time from the LIDAR file and returns a new datetime index
"""
tempD = df.date.apply(lambda x: "-".join(reversed(x.split("/"))))
return tempD + " " + df.time
def correctHeading(x, headingCorrection):
"""
corrects a given column of directional data with the given headingCorrection
"""
if x + headingCorrection > 360:
return (x + headingCorrection) - 360
else:
return x + headingCorrection
def processLIDARFile(lidarFile, keys, verbose=False, lidarLevels=(0, 10)):
"""
reads in a lidar file in csv format, parses out wind speed, wind direction
and return point altitude
returns a single dict with the datestring as key and the pandas dataframe as value
lidarFile: Path to a valid LIDAR file
keys: dictionary containing the columns from the lidarFile to be used
verbose: switch to activate detailled output
lidarLevels: range of lidar return levels to be processed
"""
if verbose: print("* reading in file...")
try:
rawData = pd.read_csv(lidarFile, low_memory=False)
except Exception as e:
print('*! failed to read in file, skipping {} -> {}'.format(lidarFile, e))
return pd.DataFrame()
if verbose: print("* done")
cleanData = pd.DataFrame()
if verbose: print("* iterating over lidar return levels:")
for i in range(lidarLevels[0], lidarLevels[1]+1):
if verbose: print("* lidar level {}".format(i))
# extract wind speed (ws), direction (dir) and the height of the lidar return point (h)
cleanData.insert(column="ws_{}".format(i),
value=rawData[keys["ws_{}".format(i)]].copy(),
loc=len(cleanData.columns)
)
cleanData.insert(column="dir_{}".format(i),
value=rawData[keys["dir_{}".format(i)]].copy(),
loc=len(cleanData.columns)
)
cleanData.insert(column="dir_{}_corr".format(i),
value=rawData[keys["dir_{}".format(i)]].copy(),
loc=len(cleanData.columns)
)
cleanData.insert(column="h_{}".format(i),
value=rawData[keys["h_{}".format(i)]].copy(),
loc=len(cleanData.columns)
)
if verbose: print("* adding heading")
cleanData.insert(column="heading",
value=rawData["Ships Gyro 1 Value"],
loc=len(cleanData.columns)
)
if verbose: print("* adding time/date")
cleanData.insert(column="time",
value=rawData.Time,
loc=0
)
cleanData.insert(column="date",
value=rawData.Date,
loc=0
)
dateString = "-".join(reversed(cleanData.date[2].split("/")))
return {dateString : cleanData}
def cleanLIDARData(data, verbose=False, timezone='Europe/Berlin'):
"""
takes a pandas dataframe as input and performs various numerical operations on it:
- dropping non numeric data lines
- creating a time zone aware index
- setting the time zone to Europe/Berlin
- converting to numeric data
- cleaning of NaNs
"""
if verbose: print("* processing: {}".format('-'.join(reversed(data.date[2].split('/')))))
if verbose: print("* dropping non-parsable lines")
# mitigate weird error: in some files, the header appears randomly
data.drop(data.loc[data.date == "Date"].index, inplace=True)
if verbose: print("* creating new UTC time index")
# create a new date time index with the format YYYY-MM-DD HH:MM:SS
try:
data.insert(column="datetime",
value=pd.to_datetime(generateDateTime(data), utc=True),
loc=0
)
except Exception as e:
print('*! failed to generate datetime index, skipping')
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index= | Grouper(freq='6MS', level='foo') | pandas.Grouper |
import os
import numpy as np
import matplotlib.pyplot as pp
import pandas as pd
#########################
## INTIALISE VARIABLES ##
#########################
newDesk=[]
selectedList=[]
yPlotlabel=""
flow=["red", "orange","brown","tan", "lime", "purple", "teal", "black", "blue", "grey", "pink", "violet", "goldenrod","darkkhaki","peru", "saddlebrown"]
blues=["blue","turquoise","lime", "darkgreen","midnightblue", "slateblue", "dodgerblue", "mediumblue", "seagreen","yellowgreen","olivedrab","lightseagreen"]
greens=["olive","crimson","black", "blue", "maroon", "lightcoral", "chocolate", "lightsalmon", "darkolivegreen", "rosybrown"]
reds=flow+blues+greens+flow+blues+greens
BODStats=pd.DataFrame()
######################
## DEFINE FUNCTIONS ##
######################
def importData(directory):
os.chdir(directory)
folderList=os.listdir()
idvgData=pd.DataFrame() # Initialises a blank dataframe to be appended to
newDesk=[] # Initialise a blank list for the data to be selected from
counter=0
for folderName in folderList:# Loop over the functionalisation folders
os.chdir(directory)
folderList=os.listdir( )# Now list the FOLDERS inside the top directory
os.chdir(directory+"/"+folderName) # Change directory to the ith folderName
fileList=os.listdir() # List the FILES in the folderName FOLDER
for file in fileList:# Loop over the files in the fileList and import them to the dataframe with a new snazzier name
fName = directory+"/"+folderName+"/"+file
df=pd.read_csv(fName, usecols=[1,2], skiprows=248)
global device
newTitle,device = newNameFinal(folderName,file)
df.columns=pd.MultiIndex.from_product([[newTitle],df.columns]) # Introduce multiindex naming of columns
idvgData=pd.concat([idvgData,df],axis=1)
newDesk.append(newTitle)
global copied_original
copied_original=idvgData.copy()
copied_original.name=device
return copied_original,device,newDesk
def newNameFinal(folderName1, originalName):
# Takes a file name and shortens it based on the position of the "_" and then concatenates with the folder name.
displayText=originalName[0:originalName.find("_")]
outputName=folderName1+"_"+displayText
return outputName, displayText[0:2]
def importBOD(filename):
# Imports data from a .BOD file (a file which has been previosuly exported from SCRAMBLE)
BODdf=pd.read_csv(filename, header=[0,1])
global copied_original
copied_original=BODdf.copy()
# Produce a list of the data
niceCoffee=[]
for i, x in enumerate(BODdf.columns.get_level_values(0)):
if i%2>0: # Select every other name as they are repeated
niceCoffee.append(x)
return copied_original,niceCoffee
def statsTable(selection):
bigData=copied_original.copy() # Always work from a copy of the original data
statsInput=bigData.loc[:,(selection)] # Filter based on name of data
sVg = statsInput.loc[:,[statsInput.columns[0]]] # Select the Vbg
sDrain = statsInput.loc[:,[statsInput.columns[1]]] # Select the Ids
statsFrame=pd.DataFrame() #Initialise the dataframe for this loop
## FORWARD SWEEP STATS ##
#Slice the data and select the forward sweep
fVg=sVg.iloc[0:(int(statsInput.shape[0]/2))]
fDrain=sDrain.iloc[0:(int(statsInput.shape[0]/2))]
#DP Current - fDPI
fMinI=fDrain.describe().loc["min"]
statsFrame=pd.concat([statsFrame,fMinI],ignore_index=True)
#DP Voltage - fDPV
fMinVIndex=abs(fDrain-fMinI).idxmin()
fMinV1=fVg.iloc[fMinVIndex].values[0][0]
fMinV= | pd.Series(fMinV1) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Create datasets for G1
# ## Scalling and Feature Selection
# The original dataset and/or the ballanced ones will be first splitted into separated files as training and test subsets using a **seed**. All the scalling and feature selection will be apply **only on training set**:
# - *Dataset split*: train, test sets; the train set will be divided into train and validation in future Machine Learning hyperparameter search for the best model with a ML method;
# - *Scalling* of train set using centering, standardization, etc.;
# - *Reduction* of train set dimension (after scalling): decrease the number of features using less dimensions/derived features;
# - *Feature selection* using train set (after scalling): decrease the number of features by keeping only the most important for the classification.
#
# Two CSV files will be create for each type of scalling, reduction or feature selection: *tr* - trainin and *ts* - test.
# In[ ]:
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split # for dataset split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
# Let's define the name of the original dataset, the folder and the prefix characters for each scalling, dimension reduction or feature selection. Each transformation will add a prefix to the previous name of the file.
#
# **You can used the original dataset that could be unballanced or the ballanced datasets obtained with previous scripts (one file only)!**
# In[ ]:
# Create scalled datasets using normalized MA dataset
# Two CSV files will be create for each type of scalling, reduction or feature selection
WorkingFolder = './datasets/'
# change this with ballanced datasets such as upsampl.ds_MA.csv or downsampl.ds_MA.csv
# if you want to run all files, you should modify the entire script by looping all
# transformation using a list of input files [original, undersampled, upsampled]
sOrigDataSet = 'ds_MA.csv'
sOrigDataSet_G1 = 'ds.G1_MA.csv'
sOrigDataSet_G1_det = 'ds.G1_details.csv'
# Split details
seed = 44 # for reproductibility
test_size = 0.25 # train size = 1 - test_size
outVar = 'Lij' # output variable
# Scalers: the files as prefix + original name
# =================================================
# Original (no scaling!), StandardScaler, MinMaxScaler, RobustScaler,
# QuantileTransformer (normal), QuantileTransformer(uniform)
# scaler prefix for file name
#scalerPrefix = ['o', 's', 'm', 'r', 'pyj', 'qn', 'qu']
# scalerPrefix = ['o', 's', 'm', 'r']
scalerPrefix = ['s']
# sklearn scalers
#scalerList = [None, StandardScaler(), MinMaxScaler(),
# RobustScaler(quantile_range=(25, 75)),
# PowerTransformer(method='yeo-johnson'),
# QuantileTransformer(output_distribution='normal'),
# QuantileTransformer(output_distribution='uniform')]
# sklearn scalers
# scalerList = [None, StandardScaler(), MinMaxScaler(), RobustScaler()]
scalerList = [StandardScaler()]
# Dimension Reductions
# ===================
# PCA
reductionPrefix = 'pca'
# Feature selection
# =================
# RF feature selection, Univariate feature selection using chi-squared test,
# Univariate feature selection with mutual information
# prefix to add to the processed files for each FS method
#FSprefix = ['fs.rf.',
# 'fs.univchi.',
# 'fs.univmi.']
FSprefix = ['fs-rf.']
# number of total features for reduction and selection if we are not limited by experiment
noSelFeatures = 50
# Start by reading the original dataset:
# In[ ]:
print('-> Reading source dataset:',sOrigDataSet,'...')
df = pd.read_csv(os.path.join(WorkingFolder, sOrigDataSet))
print('Columns:',len(df.columns),'Rows:',len(df))
print('Done')
print('-> Reading source dataset G1:',sOrigDataSet_G1,'...')
df_G1 = pd.read_csv(os.path.join(WorkingFolder, sOrigDataSet_G1))
print('Columns:',len(df_G1.columns),'Rows:',len(df_G1))
print('Done')
print('-> Reading source dataset G1 details:',sOrigDataSet_G1_det,'...')
df_G1_det = pd.read_csv(os.path.join(WorkingFolder, sOrigDataSet_G1_det))
print('Columns:',len(df_G1_det.columns),'Rows:',len(df_G1_det))
print('Done')
# ## Dataset split
#
# First, split the dataset using stratification for non-ballanced datasets: the ratio between the classes is the same in training and test sets.
# In[ ]:
# Get features and ouput as dataframes
print('--> Split of dataset in training and test ...')
X = df.drop(outVar, axis = 1) # remove output variable from input features
y = df[outVar] # get only the output variable
# get only the values for features and output (as arrays)
Xdata = X.values # get values of features
Ydata = y.values # get output values
# split data in training and test sets (X = input features, y = output variable)
# using a seed, test size (defined above) and stratification for un-ballanced classes
X_train, X_test, y_train, y_test = train_test_split(Xdata, Ydata,
test_size=test_size,
random_state=seed,
stratify=Ydata)
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('X_test:', X_test.shape)
print('y_test:', y_test.shape)
print('Done!')
# In[ ]:
MAs = [col for col in df_G1.columns if ('MA-' in col)]
len(MAs)
# In[ ]:
# Get features and ouput as dataframes for G1
print('--> Split of dataset in training and test ...')
X_G1 = df_G1[MAs] # remove output variable from input features
#X_G1 = df_G1.drop(outVar, axis = 1) # remove output variable from input features
y_G1 = df_G1[outVar] # get only the output variable
# get only the values for features and output (as arrays)
Xdata_G1 = X_G1.values # get values of features
Ydata_G1 = y_G1.values # get output values
print('Xdata_G1:', Xdata_G1.shape)
print('Ydata_G1:', Ydata_G1.shape)
# ## Dataset scaling
#
# Two files will be saved for training and test sets for each scaling including non-scalling dataset.
# In[ ]:
# Scale dataset
print('-> Scaling dataset train and test:')
for scaler in scalerList: # or scalerPrefix
# new file name; we will add tr and ts + csv
newFile = scalerPrefix[scalerList.index(scaler)]+'.'+sOrigDataSet[:-4]
# decide to scale or not
if scaler == None: # if it is the original dataset, do not scale!
print('--> Original (no scaler!) ...')
X_train_transf = X_train # do not modify train set
X_test_transf = X_test # do not modify test set
else: # if it is not the original dataset, apply scalers
print('--> Scaler:', str(scaler), '...')
X_train_transf = scaler.fit_transform(X_train) # use a scaler to modify only train set
X_test_transf = scaler.transform(X_test) # use the same transformation for test set
X_G1_transf = scaler.transform(Xdata_G1) # use the same transformation for G1
# Save the training scaled dataset
df_tr_scaler = pd.DataFrame(X_train_transf, columns=X.columns)
df_tr_scaler[outVar]= y_train
newFile_tr = newFile +'_tr.csv'
print('---> Saving training:', newFile_tr, ' ...')
df_tr_scaler.to_csv(os.path.join(WorkingFolder, newFile_tr), index=False)
# Save the test scaled dataset
df_ts_scaler = pd.DataFrame(X_test_transf, columns=X.columns)
df_ts_scaler[outVar]= y_test
newFile_ts = newFile +'_ts.csv'
print('---> Saving test:', newFile_ts, ' ...')
df_ts_scaler.to_csv(os.path.join(WorkingFolder, newFile_ts), index=False)
# Save G1 scaled dataset for future predictions
df_G1_scaler = pd.DataFrame(X_G1_transf, columns=X.columns)
df_G1_scaler[outVar]= Ydata_G1
newFile_tr = newFile +'_G1.csv'
print('---> Saving G1 scaled:', newFile_tr, ' ...')
df_G1_scaler.to_csv(os.path.join(WorkingFolder, newFile_tr), index=False)
print('Done!')
# In[ ]:
# save scaler as file
from sklearn.externals import joblib
scaler_filename = os.path.join(WorkingFolder, "scaler.save")
joblib.dump(scaler, scaler_filename)
# In[ ]:
# means of the scaling
scaler.mean_
# In[ ]:
# means of the scaling
scaler.mean_
# In[ ]:
# variances of the scaling
scaler.var_
# In[ ]:
# s of the scaling
scaler.scale_
# Save to files means, vars and s for StandarScaller (we need these value for the G1 prediction!):
# In[ ]:
np.savetxt(os.path.join(WorkingFolder, 'StandardScaler_mean.csv'), scaler.mean_.reshape((-1, 1)).T, delimiter=',')
np.savetxt(os.path.join(WorkingFolder, 'StandardScaler_var.csv'), scaler.var_.reshape((-1, 1)).T, delimiter=',')
np.savetxt(os.path.join(WorkingFolder, 'StandardScaler_scale.csv'), scaler.scale_.reshape((-1, 1)).T, delimiter=',')
# In[ ]:
# ### G1 scaling
# In[ ]:
from sklearn.externals import joblib
scaler_filename = os.path.join(WorkingFolder, "scaler.save")
# load the scaler
scaler = joblib.load(scaler_filename)
# In[ ]:
WorkingFolder = './datasets/'
fG1_MAs = "ds.G1_MA.csv"
print('-> Reading source dataset G1:',fG1_MAs,'...')
df_G1 = pd.read_csv(os.path.join(WorkingFolder, fG1_MAs))
print('Columns:',len(df_G1.columns),'Rows:',len(df_G1))
print('Done')
# In[ ]:
X_G1 = df_G1.drop(outVar, axis = 1) # remove output variable from input features
y_G1 = df_G1['Lij'] # get only the output variable
# get only the values for features and output (as arrays)
Xdata_G1 = X_G1.values # get values of features
Ydata_G1 = y_G1.values # get output values
print('Xdata_G1:', Xdata_G1.shape)
print('Ydata_G1:', Ydata_G1.shape)
# In[ ]:
X_G1_transf = scaler.transform(Xdata_G1) # use the same transformation for G1
# In[ ]:
# Save G1 scaled dataset for future predictions
df_G1_scaler = | pd.DataFrame(X_G1_transf, columns=X_G1.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 21:25:44 2021
@author: laukkara
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.cluster import DBSCAN
import scipy.stats as ss
replacements = {'USP': 'PRP',
'UST': 'PRT',
'BSWE': 'BSW',
'PICB': 'EHR',
'TRC': 'BSR',
'SW': 'PVP',
'USH': 'USH',
'YP1': 'YP',
'YP3': 'YP'}
def get_new_names(old_names):
new_names = []
for old_name in old_names:
items_list = old_name.split('_')
first = items_list[0]
if 'north' in first:
first = first[0:-5]
elif 'south' in first:
first = first[0:-5]
else:
print(first)
str_dummy = '{}, {}, {}'.format(replacements[first],
int(items_list[-2].replace('df','')) + 1,
items_list[-1])
new_names.append(str_dummy)
return(new_names)
#############################################
def create_X(data, case_filters, y_yes_filters, y_not_filters):
# This function filters spesific case+mp pairs
list_values = []
list_names = []
for idx_case, case in enumerate(data):
# loop through cases
# Check that location, climate and year columns are identical
if idx_case == 0:
ids1 = data[case].loc[:, ['location', 'climate', 'year']].copy()
else:
ids2 = data[case].loc[:, ['location', 'climate', 'year']].copy()
if not ids1.equals(ids2):
print('NOT EQUAL:', case)
for idx_column, column in enumerate(data[case].columns):
# loop through columns
cond_case_names = all(x in case for x in case_filters)
cond_yes_column_names = all(x in column for x in y_yes_filters)
cond_not_column_names = all(x not in column for x in y_not_filters)
cond_all = cond_case_names and cond_yes_column_names and cond_not_column_names
if cond_all:
column_str = '{}__{}'.format(case, column)
list_values.append(data[case].loc[:, column])
list_names.append(column_str)
df_X = pd.concat(list_values, axis=1, keys=list_names)
df_X = pd.concat([df_X, ids1], axis=1)
print(' df_X.shape', df_X.shape)
return(df_X)
############################################
def generate_30year_xlsx(list_df, output_dir, filename_str):
# list_df = [df0, df1] from clustering algorithm
climates_for_median = ['1989-2018', 'RCP45-2050','RCP45-2080','RCP85-2050','RCP85-2080']
## Excel files
for idx_df, df in enumerate(list_df):
idxs_col = ['location', 'climate', 'year']
ids = df[idxs_col].copy()
df = df.drop(columns=idxs_col).copy()
#print('list_df, df.shape', df.shape) # df.shape == (1200, 2...166), kun USP
locations = ids['location'].unique()
xlsx_file_path = os.path.join(output_dir,
'{}_df{}_n{}_yearClimateMedians.xlsx' \
.format(filename_str, idx_df, df.shape[1]))
writer = | pd.ExcelWriter(xlsx_file_path, engine='xlsxwriter') | pandas.ExcelWriter |
# coding: utf-8
"""tools for analyzing VPs in an individual precipitation event"""
from collections import OrderedDict
from os import path
from datetime import timedelta
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.io import loadmat
from radcomp.vertical import (filtering, classification, plotting, insitu, ml,
deriv, NAN_REPLACEMENT)
from radcomp import arm, azs
from radcomp.tools import strftime_date_range, cloudnet
from j24 import home, daterange2str
USE_LEGACY_DATA = False
if USE_LEGACY_DATA:
DATA_DIR = path.join(home(), 'DATA', 'vprhi')
DATA_FILE_FMT = '%Y%m%d_IKA_VP_from_RHI.mat'
else:
DATA_DIR = path.join(home(), 'DATA', 'vprhi2')
DATA_FILE_FMT = '%Y%m%d_IKA_vprhi.mat'
DEFAULT_PARAMS = ['zh', 'zdr', 'kdp']
def case_id_fmt(t_start, t_end=None, dtformat='{year}{month}{day}{hour}',
day_fmt='%d', month_fmt='%m', year_fmt='%y', hour_fmt='T%H'):
"""daterange2str wrapper for date range based IDs"""
return daterange2str(t_start, t_end, dtformat=dtformat, hour_fmt=hour_fmt,
day_fmt=day_fmt, month_fmt=month_fmt,
year_fmt=year_fmt)
def date_us_fmt(t_start, t_end, dtformat='{day} {month} {year}', day_fmt='%d',
month_fmt='%b', year_fmt='%Y'):
"""daterange2str wrapper for US human readable date range format"""
return daterange2str(t_start, t_end, dtformat=dtformat, day_fmt=day_fmt,
month_fmt=month_fmt, year_fmt=year_fmt)
def vprhimat2pn(datapath):
"""Read vertical profile mat files to Panel."""
# TODO: Panel
try:
data = loadmat(datapath)['VP_RHI']
except FileNotFoundError as e:
print('{}. Skipping.'.format(e))
return pd.Panel()
fields = list(data.dtype.fields)
fields.remove('ObsTime')
fields.remove('height')
str2dt = lambda tstr: pd.datetime.strptime(tstr, '%Y-%m-%dT%H:%M:%S')
t = list(map(str2dt, data['ObsTime'][0][0]))
h = data['height'][0][0][0]
data_dict = {}
for field in fields:
data_dict[field] = data[field][0][0].T
try:
return pd.Panel(data_dict, major_axis=h, minor_axis=t)
# sometimes t does not have all values
except ValueError as e:
if data_dict['ZH'].shape[1] == 96:
# manouver to set correct timestamps when data missing
t1 = t[0] + timedelta(hours=23, minutes=45)
midnight = t1.replace(hour=0, minute=0)
if midnight <= t[0]:
midnight += timedelta(hours=24)
dt = t1-midnight
dt_extra = timedelta(minutes=15-(dt.total_seconds()/60)%15)
dt = dt + dt_extra
t = pd.date_range(t[0]-dt, t1-dt, freq='15min')
print('ObsTime missing values! Replacing with generated timestamps.')
return | pd.Panel(data_dict, major_axis=h, minor_axis=t) | pandas.Panel |
from __future__ import print_function
from __future__ import division
# load libraries
from builtins import str
from builtins import range
from past.utils import old_div
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import geopandas as gpd
import sys
import os
from matplotlib.collections import PatchCollection
from descartes import PolygonPatch
import shapely
import geopy
import argparse
import io
import unicodedata
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import glob
import pickle
from os.path import expanduser
import pdb
import unidecode
import importlib
importlib.reload(sys)
#sys.setdefaultencoding('utf-8')
#####################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
################################################
def string_2_bool(string):
if string in ['true', 'TRUE' , 'True' , '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']:
return True
else:
return False
################################################
def saveplot(appellations_domain,vin,metropole,bassins):
#to set extent
appellations_domain = gpd.GeoDataFrame(pd.DataFrame(appellations_domain), crs=appellations.crs)
if len(appellations_domain)>0:
xmin,ymin, xmax,ymax = appellations_domain.total_bounds
elif len(bassins.loc[bassins.nom == vin.Bassin]) == 0:
xmin,ymin,xmax,ymax = 0, 0, 0, 0
else:
xmin,ymin,xmax,ymax = bassins.loc[bassins.nom == vin.Bassin].total_bounds
#vin.geometry.coords.xy[0][0]-1.e5, vin.geometry.coords.xy[0][0]+1.e5,\
#vin.geometry.coords.xy[1][0]-1.e5, vin.geometry.coords.xy[1][0]+1.e5
if vin.Bassin == 'Bourgogne': buffer_lim = 100.e3
elif u'domainedelamordor' in vin.DomaineChateau.replace('&','').replace(' ','').lower() : buffer_lim = 50.e3
else: buffer_lim = 200.e3
'''
for side in ax.spines.keys(): # 'top', 'bottom', 'left', 'right'
ax.spines[side].set_linewidth(1)
for side in bx.spines.keys(): # 'top', 'bottom', 'left', 'right'
bx.spines[side].set_linewidth(1)
'''
xx = xmax-xmin; yy = ymax-ymin
xc = xmin+.5*xx; yc = ymin+.5*yy
dd = max([xx,yy,buffer_lim])
xmin = xc-.5*dd; xmax = xc+.5*dd
ymin = yc-.5*dd; ymax = yc+.5*dd
bufferZone_map = 10.e3
ax.set_xlim(xmin-bufferZone_map, xmax+bufferZone_map)
ax.set_ylim(ymin-bufferZone_map, ymax+bufferZone_map)
#ax.legend(handles = LegendElement_domain, loc='upper right', labelspacing=1.1, handlelength=2.5, handleheight=1.9)
ax.legend(handles = LegendElement_domain, labelspacing=.7, handlelength=2.5, handleheight=1.9, ncol=int(vin['legend_nbreColumn']), loc=vin['legend_loc'],
fancybox=True, framealpha=0.9 )
# for multiple column
# probably need to add column in excel file to tell where to put the legend and the number of columns
#https://stackoverflow.com/questions/42103144/how-to-align-rows-in-matplotlib-legend-with-2-columns
# frame of left plot is not good. try better framing
#chateauAmpuis is doing weird stuff on the legend
# find a solution for alsace appellations
#add igp
if vin.Pays == 'France':
#add prefecture
if vin.DomaineChateau == u'Ch\xe2teau La Borie':
selectedCommune = ['Nice', 'Marseille', 'Montpellier', 'Avignon', 'Gap', 'Saint-\xc3\x89tienne', 'Valence', 'Bastia', 'Ajaccio']
else:
selectedCommune = prefectures.Commune.to_list()
prefectures.loc[prefectures.Commune.isin(selectedCommune)].plot(ax=ax, color='k', markersize=20,)
prefectures.loc[prefectures.Commune.isin(selectedCommune)].apply(lambda x: ax.annotate(text=str(x.Commune),\
xy=[x.geometry.centroid.x + x.add_to_name_position.coords.xy[0][0],\
x.geometry.centroid.y + x.add_to_name_position.coords.xy[1][0] ], ha=x.LabelLoc_ha,va=x.LabelLoc_va,zorder=5),axis=1);
try:
minx, miny, maxx, maxy = metropole.geometry.total_bounds
xx = maxx-minx; yy = maxy-miny
xc = minx+.5*xx; yc = miny+.5*yy
dd = max([xx,yy])
minx = xc-.5*dd; maxx = xc+.5*dd
miny = yc-.5*dd; maxy = yc+.5*dd
except:
minx = 0; maxx = 0
miny = 0; maxy = 0
buffer_lim = 50.e3
bx.set_xlim(minx-buffer_lim,maxx+buffer_lim)
bx.set_ylim(miny-buffer_lim,maxy+buffer_lim)
# add image location and France map
rect = mpatches.Rectangle((xmin-bufferZone_map,ymin-bufferZone_map),(xmax-xmin)+2*bufferZone_map,\
(ymax-ymin)+2*bufferZone_map, linewidth=1, edgecolor='k', facecolor='none')
bx.add_patch(rect)
fig.savefig(map_domain, dpi=dpi_map, facecolor=fig.get_facecolor())
plt.close(fig)
def simple_appelation(appelation):
if appelation == 'Alsace Gewurztraminer': return 'Alsace'
if appelation == 'Alsace Riesling' : return 'Alsace'
if appelation == 'Alsace Pinot Noir' : return 'Alsace'
return appelation
##########################
if __name__ == '__main__':
##########################
parser = argparse.ArgumentParser(description='draw wine map')
parser.add_argument('-s','--flag_start', help='False if input files needed to be reloaded',required=False)
parser.add_argument('-v','--flag_vin', help='True if only wine list need to be reloaded',required=False)
parser.add_argument('-b','--flag_border', help='True if reload border shapefile',required=False)
args = parser.parse_args()
home = expanduser("~")
# where are the input data
dir_in = home+'/Dropbox/CarteVin/'
# where to generate vin.tex
dir_out = './'
wkdir = dir_out + 'VinData/'
ensure_dir(wkdir)
#listDesVins
file_listDesVins = home+'/Dropbox/CarteVin/MaCave/ListeDesVins.xlsx'
dpi_largePlot = 100
dpi_map = 100
#define Input
if args.flag_start is None:
flag_restart = True
else:
flag_restart = string_2_bool(args.flag_start)
if args.flag_vin is None:
flag_vin = False
else:
flag_vin = string_2_bool(args.flag_vin)
if args.flag_border is None:
flag_border = False
else:
flag_border = string_2_bool(args.flag_border)
######################
# france border contour
######################
if ((flag_border) or (not(os.path.isfile(wkdir+"metropole.shp")))):
print('france border contour ...')
fp = dir_in+'communes-20150101-5m-shp/communes-20150101-5m.shp'
map_dfCommune = gpd.read_file(fp)
map_dfCommune = map_dfCommune.rename(columns={'nom':'Commune'})
map_dfCommune['metropole'] = 0
map_dfCommune.loc[ pd.to_numeric(map_dfCommune['insee'], errors='coerce') < 97000, 'metropole'] = 1
map_dfCommune.loc[map_dfCommune['insee'].str.contains('2A'), 'metropole'] = 1
map_dfCommune.loc[map_dfCommune['insee'].str.contains('2B'), 'metropole'] = 1
map_dfCommune.to_file(wkdir+"map_df_communes.shp")
metropole_geometry = map_dfCommune[['metropole','geometry']].dissolve(by='metropole')
metropole = gpd.GeoDataFrame(pd.DataFrame({'countryMainLand': ['France,']}),geometry=[metropole_geometry.geometry[1]],crs=metropole_geometry.crs)
metropole = metropole.to_crs(epsg=3395)
metropole.to_file(wkdir+"metropole.shp")
else:
metropole = gpd.read_file(wkdir+"metropole.shp")
map_dfCommune = gpd.read_file(wkdir+"map_df_communes.shp")
######################
#load appelation
######################
listAppellations = pd.read_csv(dir_in+'liste-AOC-vins-wikipedia.csv')
listAppellations['Appellation'] = listAppellations['Appellation'].str.strip()
listAppellations['Bassin'] = listAppellations['Bassin'].str.strip()
######################
#load bassins color code
######################
listBassinColor = pd.read_csv(dir_in+'bassins-colors.csv')
listBassinColor['Bassin'] = [str(xx) for xx in listBassinColor['Bassin']]
######################
#load vines list
######################
if ((flag_vin) or (not(flag_restart)) or (not(os.path.isfile(wkdir+"listVins.gpkg")))):
print('list vins de la cave ...')
print('le fichier est ici : ', file_listDesVins)
vins_ = pd.read_excel(file_listDesVins, sheet_name='france - Liste des vins', header=2)
vins_ = vins_.loc[ (vins_['Couleur'].str.strip()=='Blanc') |
(vins_['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(vins_['Couleur'].str.strip()=='Rouge') |
(vins_['Couleur'].str.strip()==u'Ros\xe9') |
(vins_['Couleur'].str.strip()==u'Pommeau')
]
vins_2_ = pd.read_excel(file_listDesVins, sheet_name='international - Liste des vins ', header=1)
vins_2_ = vins_2_.loc[ (vins_['Couleur'].str.strip()=='Blanc') |
(vins_['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(vins_['Couleur'].str.strip()=='Rouge') |
(vins_['Couleur'].str.strip()==u'Ros\xe9') |
(vins_['Couleur'].str.strip()==u'Pommeau')
]
cidres_ = pd.read_excel(file_listDesVins, sheet_name='cidre - Liste des Cidres', header=1)
cidres_.index = list(range(len(vins_),len(vins_)+len(cidres_)))
listVins = pd.concat([ vins_, cidres_, vins_2_ ], sort=True,
ignore_index=True)
#listVins = pd.concat([ vins_2_ ], sort=True)
#clean data
listVins = listVins.loc[ (listVins['Couleur'].str.strip()=='Blanc') |
(listVins['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(listVins['Couleur'].str.strip()=='Rouge') |
(listVins['Couleur'].str.strip()==u'Ros\xe9') |
(listVins['Couleur'].str.strip()==u'Cidre') |
(listVins['Couleur'].str.strip()==u'Pommeau')
]
geocoder = geopy.geocoders.BANFrance()
geocoder_bing = None
cave = geocoder.geocode('4 rue Coat Tanguy 29890 Brignogan-Plages')
listVins['latlong'] = [cave.point]*listVins.shape[0]
for index, row in listVins.iterrows():
address1 = row['Adresse'].split(' ')
address2 = []
for address_ in address1:
tmp_ = address_.rstrip(',').rstrip(' ')
if tmp_ != '':
address2.append(tmp_)
address3 = '{:s} {:05.0f} {:s} {:s}'.format( ' '.join(address2[:-1]), row['Code postal'], address2[-1], row['Pays'])
try:
if row['Pays'] == 'France':
listVins.at[index,'latlong'] = geocoder.geocode(address3,timeout=3).point
else:
if geocoder_bing == None:
key_bing = '<KEY>'
geocoder_bing = geopy.geocoders.Bing(key_bing)
listVins.at[index,'latlong'] = geocoder_bing.geocode(address3,timeout=3).point
except geopy.exc.GeocoderTimedOut :
print('geopy timeout on :', address3)
sys.exit()
#print address3, ' | ', listVins.at[index,'latlong']
lats = [pt.latitude for pt in listVins['latlong']]
lons = [pt.longitude for pt in listVins['latlong']]
listVins = gpd.GeoDataFrame( listVins.loc[:,listVins.columns!='latlong'] ,geometry= gpd.points_from_xy(x=lons,y=lats), crs={'init': 'epsg:4326'})
#listVins = gpd.GeoDataFrame( listVins.loc[:,:] ,geometry= gpd.points_from_xy(x=lons,y=lats), crs={'init': 'epsg:4326'})
listVins = listVins.to_crs(epsg=3395)
listVins['DomaineChateau'] = [ str(xx) for xx in listVins['DomaineChateau'] ]
listVins['Pays_order'] = listVins['Pays'].str.replace('France','AAA')
#load local legend info
legendParam = pd.read_csv(dir_in+'domaineChateau_legend_location.csv')
legendParam['DomaineChateau'] = [ str(xx) for xx in legendParam['DomaineChateau'] ]
listVins = pd.merge(listVins, legendParam, how='left', on='DomaineChateau')
listVins.loc[listVins['legend_nbreColumn'].isnull(),'legend_nbreColumn'] = np.int64(1)
listVins.loc[listVins['legend_loc'].isnull(),'legend_loc'] = 'upper right'
print('{:d} vins ont ete charge'.format(listVins.shape[0]))
listVins.to_file(wkdir+"listVins.gpkg", driver="GPKG")
else:
listVins = gpd.read_file(wkdir+"listVins.gpkg", driver="GPKG")
####################
# neighbourg borders
####################
#data from https://wambachers-osm.website/boundaries/
coasts_borders = gpd.read_file(dir_in+'Borders/neighbourgCountries.shp')
coasts_borders = coasts_borders.to_crs(epsg=3395)
####################
# river and lake
####################
clc12 = gpd.read_file(dir_in+'CLC12/CLC12_FR_RGF_SHP/CLC12_FR_RGF.shp')
inlandWater = clc12.loc[\
#(clc12['CODE_12']=='511')|(clc12['CODE_12']=='522')\
#|(clc12['CODE_12']=='411')\
(clc12['CODE_12']=='521')]
inlandWater_river = gpd.read_file(dir_in+'Wise_WaterData/EuropeanRiver.shp') # for large plot
inlandWater_lake = clc12.loc[(clc12['CODE_12']=='512')]
inlandWater_lake = inlandWater_lake.loc[inlandWater_lake.geometry.area>.5e6]
seaWater = clc12.loc[(clc12['CODE_12']=='423')|(clc12['CODE_12']=='523')\
|(clc12['CODE_12']=='421')|(clc12['CODE_12']=='331')]
inlandWater = inlandWater.to_crs(epsg=3395)
seaWater = seaWater.to_crs(epsg=3395)
inlandWater_river = inlandWater_river.to_crs(epsg=3395)
rivers_hydroFrance = gpd.read_file(dir_in+'ROUTE120_1-1_SHP_LAMB93_000_2012-11-26/ROUTE120/1_DONNEES_LIVRAISON_2012-11-00377/R120_1-1_SHP_LAMB93_FR-ED121/HYDROGRAPHIE/TRONCON_HYDROGRAPHIQUE.SHP')
rivers_hydroFrance = rivers_hydroFrance.to_crs(epsg=3395)
country_pakage = {}
paysCode = {};
paysCode['Hongrie'] = 'HUN'
paysCode['France'] = 'FRA'
pays = 'France'
country_pakage[pays] = {}
country_pakage[pays]['inlandWater'] = inlandWater
country_pakage[pays]['inlandWater_river'] = rivers_hydroFrance
country_pakage[pays]['inlandWater_lake'] = inlandWater_lake
country_pakage[pays]['seaWater'] = seaWater
country_pakage[pays]['coasts_borders'] = coasts_borders
country_pakage[pays]['metropole'] = metropole
######################
# border contour international
######################
for pays in listVins['Pays']:
if pays == 'France': continue
country_pakage[pays] = {}
if pays in [u'Nouvelle Z\xe9lande ', u'Italie']:
empty_ = gpd.GeoDataFrame([], crs="EPSG:4326")
country_pakage[pays]['inlandWater'] = empty_
country_pakage[pays]['inlandWater_river'] = empty_
country_pakage[pays]['inlandWater_lake'] = empty_
country_pakage[pays]['seaWater'] = empty_
country_pakage[pays]['coasts_borders'] = empty_
country_pakage[pays]['metropole'] = empty_
continue
clc12_ = gpd.read_file(dir_in+'CLC12/CLC12_{:s}/CLC12_{:s}.shp'.format(paysCode[pays],paysCode[pays]))
inlandWater_ = clc12_.loc[(clc12_['Code_12']=='521')]
inlandWater_river_ = gpd.read_file(dir_in+'river/{:s}/{:s}_water_lines_dcw.shp'.format(paysCode[pays],paysCode[pays]))
inlandWater_lake_ = gpd.read_file(dir_in+'river/{:s}/{:s}_water_areas_dcw.shp'.format(paysCode[pays],paysCode[pays]))
#inlandWater_lake_ = clc12_.loc[(clc12_['Code_12']=='512')]
#inlandWater_lake_ = inlandWater_lake_.loc[inlandWater_lake_.geometry.area>.5e6]
seaWater_ = clc12_.loc[(clc12_['Code_12']=='423')|(clc12_['Code_12']=='523')\
|(clc12_['Code_12']=='421')|(clc12_['Code_12']=='331')]
inlandWater_ = inlandWater_.to_crs(epsg=3395)
seaWater_= seaWater_.to_crs(epsg=3395)
inlandWater_river_ = inlandWater_river_.to_crs(epsg=3395)
inlandWater_lake_ = inlandWater_lake_.to_crs(epsg=3395)
coasts_borders_ = gpd.read_file(dir_in+'Borders/International/{:s}/neighbourgCountries.geojson'.format(pays))
coasts_borders_ = coasts_borders_.to_crs(epsg=3395)
metropole_ = gpd.read_file(dir_in+'Borders/International/{:s}/metropole.geojson'.format(pays))
metropole_ = metropole_.to_crs(epsg=3395)
country_pakage[pays]['inlandWater'] = inlandWater_
country_pakage[pays]['inlandWater_river'] = inlandWater_river_
country_pakage[pays]['inlandWater_lake'] = inlandWater_lake_
country_pakage[pays]['seaWater'] = seaWater_
country_pakage[pays]['coasts_borders'] = coasts_borders_
country_pakage[pays]['metropole'] = metropole_
######################
#load insee / postal code
######################
#insee_cp_commune = pd.read_csv(dir_in+'laposte_hexasmal.csv')
#insee_cp_commune = insee_cp_commune.rename(columns={'Code_postal':'CI'})
######################
#load insee / postal code
######################
prefectures = pd.read_csv(dir_in+'hotels-de-prefectures-fr.csv')
prefectures = gpd.GeoDataFrame( prefectures.loc[:,(prefectures.columns!='LonDD')&(prefectures.columns!='LatDD')] ,
geometry= gpd.points_from_xy(x=prefectures['LonDD'],y=prefectures['LatDD']), crs={'init': 'epsg:4326'})
prefectures = prefectures.to_crs(epsg=3395)
prefectures['add_to_name_position'] = shapely.geometry.Point(0,0)
######################
# Merge geo commune info and appellation
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"map_df_communes_appellation_bassin.shp")))):
#load appelation par communes
allAppellation_per_communes = pd.read_csv(dir_in+'2020-02-26-comagri-communes-aires-ao_ronan.csv')
allAppellation_per_communes.rename(columns=lambda x: x.replace('Aire geographique','Appellation'), inplace=True)
allAppellation_per_communes.Appellation = allAppellation_per_communes.Appellation.str.lower().str.replace(' ','-')
allAppellation_per_communes = allAppellation_per_communes.rename(columns={'CI':'insee'})
print('merge commune and appellations ...')
#join df
appellation_bassin_per_communes = allAppellation_per_communes.set_index('Appellation').join(listAppellations.set_index('Appellation'))
#deal with Alsace
appellation_bassin_per_communes.loc['alsace':'alsacf','Bassin'] = 'Alsace'
#fiefs vendees
appellation_bassin_per_communes.loc[['fiefs-vend\xc3\xa9ens' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Vall\xc3\xa9e de la Loire'
#gaillac
appellation_bassin_per_communes.loc[['gaillac' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Sud-Ouest'
#saumur
appellation_bassin_per_communes.loc[['saumur' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Vall\xc3\xa9e de la Loire'
#vosne-romanee
appellation_bassin_per_communes.loc[['vosne-roman\xc3\xa9e' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Bourgogne'
#vougeot
appellation_bassin_per_communes.loc[['vougeot' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Bourgogne'
appellation_bassin_per_communes = appellation_bassin_per_communes.loc[appellation_bassin_per_communes['Bassin'].notna()]
appellation_bassin_per_communes = appellation_bassin_per_communes.reset_index()
#convert CI to insee format
idx_notCorsica = appellation_bassin_per_communes.loc[pd.to_numeric(appellation_bassin_per_communes['insee'], errors='coerce').notna()].index
tmp_ = pd.to_numeric(appellation_bassin_per_communes.loc[idx_notCorsica,'insee'], errors='coerce').map('{:05g}'.format)
appellation_bassin_per_communes.loc[idx_notCorsica,'insee'] = tmp_
map_df = map_dfCommune.merge(appellation_bassin_per_communes, on='insee')
map_df.Bassin = [ str(xx) for xx in map_df.Bassin]
map_df.to_file(wkdir+"map_df_communes_appellation_bassin.shp")
else:
map_df = gpd.read_file(wkdir+"map_df_communes_appellation_bassin.shp")
######################
# bassins shapefile
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"bassins.shp")))):
print('bassins ...')
bassins = map_df[['Bassin','geometry']].dissolve(by='Bassin').reset_index()
bassins = bassins.merge(listBassinColor,on='Bassin')
#sort by area to help plotting
bassins['area']=bassins.geometry.area
bassins = bassins.sort_values(by='area', ascending=False)
bassins = bassins.to_crs(epsg=3395)
bassins = bassins.rename(columns={'Bassin':'nom'})
bassins.to_file(wkdir+"bassins.shp")
else:
bassins = gpd.read_file(wkdir+"bassins.shp")
bassins = bassins.sort_values(by='area', ascending=False)
country_pakage['France']['bassins'] = bassins
######################
# appellations shapefile
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations.shp")))):
print('appellations ...')
appellations = map_df[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
appellations = appellations.rename(columns={'Bassin':'bassin'})
#sort by area to help plotting
appellations['area']=appellations.geometry.area
appellations = appellations.sort_values(by='area', ascending=False)
appellations = appellations.rename(columns={'Appellation':'nom'})
appellations = appellations.to_crs(epsg=3395)
appellations.to_file(wkdir+"appellations.shp")
else:
appellations = gpd.read_file(wkdir+"appellations.shp")
appellations = appellations.sort_values(by='area', ascending=False)
########################################
#Add IGP stored locally in appellations:
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_igp.shp")))):
dir_igp = dir_in+'IGP/'
appellations_igp = []
for csvFile in glob.glob(dir_igp+'*.csv'):
igp_ = pd.read_csv(csvFile)
igp_.rename(columns=lambda x: x.replace('Aire geographique','Appellation'), inplace=True)
igp_.Appellation = igp_.Appellation.str.lower().str.replace(' ','-')
igp_ = igp_.rename(columns={'CI':'insee'})
igp_.insee = [str(xx) for xx in igp_.insee]
igp_.Bassin = [ str(xx) for xx in igp_.Bassin]
map_df_ = map_dfCommune.merge(igp_, on='insee')
map_df_.Bassin = [ str(xx) for xx in map_df_.Bassin]
igp_gpd = map_df_[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
igp_gpd['area']=igp_gpd.geometry.area
igp_gpd = igp_gpd.to_crs(epsg=3395)
igp_gpd = igp_gpd.rename(columns={'Appellation':'nom'})
igp_gpd = igp_gpd.rename(columns={'Bassin':'bassin'})
appellations_igp.append(igp_gpd)
appellations_igp = pd.concat(appellations_igp, ignore_index=True)
appellations_igp.to_file(wkdir+"appellations_igp.shp")
else:
appellations_igp = gpd.read_file(wkdir+"appellations_igp.shp")
appellations_igp = appellations_igp.sort_values(by='area', ascending=False)
########################################
#replace geographic zone for alsace grand cru to production zone stored locally in CarteVin/GrandCruAslace:
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_AlsaceGrandCru.shp")))):
dir_agc = dir_in+'GrandCruAlsace/'
appellations_agc = []
for csvFile in glob.glob(dir_agc+'*.csv'):
try:
agc_ = pd.read_csv(csvFile, sep=';', encoding = "ISO-8859-1")
except:
pdb.set_trace()
agc_.rename(columns=lambda x: x.replace('Aire g\xe9ographique','Appellation'), inplace=True)
agc_.Appellation = agc_.Appellation.str.lower().str.replace(' ','-')
agc_ = agc_.rename(columns={'CI':'insee'})
agc_.insee = [str(xx) for xx in agc_.insee]
agc_['Bassin'] = ['Alsace']*len(agc_)
agc_ = agc_.loc[agc_['Zone'] == 'Zone de production des raisins']
map_df_ = map_dfCommune.merge(agc_, on='insee')
map_df_.Bassin = [ str(xx) for xx in map_df_.Bassin]
agc_gpd = map_df_[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
agc_gpd['area']=agc_gpd.geometry.area
agc_gpd = agc_gpd.to_crs(epsg=3395)
agc_gpd = agc_gpd.rename(columns={'Appellation':'nom'})
agc_gpd = agc_gpd.rename(columns={'Bassin':'bassin'})
appellations_agc.append(agc_gpd)
appellations_agc = pd.concat(appellations_agc, ignore_index=True)
appellations_agc.to_file(wkdir+"appellations_AlsaceGrandCru.shp")
else:
appellations_agc = gpd.read_file(wkdir+"appellations_AlsaceGrandCru.shp")
appellations_agc = appellations_agc.sort_values(by='area', ascending=False)
########################################
#Add particular zone stored locally in appellations: (ex: cidre fouesnantais)
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_other.shp")))):
dir_other = dir_in+'AutreAppellation/'
appellations_other = []
for csvFile in glob.glob(dir_other+'*.csv'):
other_ = | pd.read_csv(csvFile) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import geopandas as gpd
from geopandas import GeoDataFrame
from shapely.geometry import Point, Polygon
species = pd.read_csv('C:/Users/Kaytie/Desktop/School/Regis University/Summer 2021/MSDS 696/species.csv', low_memory=False)
species.head()
parks = pd.read_csv('C:/Users/Kaytie/Desktop/School/Regis University/Summer 2021/MSDS 696/parks.csv', low_memory=False)
parks.head()
species1 = species[(species.Occurrence=='Present') & (species.Record_Status
=='Approved')]
species2 = species1.drop(['Order', 'Family', 'Scientific_Name', 'Abundance', 'Seasonality', 'Conservation_Status'], axis=1)
species2.info()
species2["Park Name"].value_counts(sort=True)
parkshort = parks.drop(['State', 'Acres', 'Park Code'], axis=1)
parkshort.info()
species.info()
parks.info()
crs={'init':'epsg:4326'}
base = gpd.read_file("C:/Users/Kaytie/Desktop/shp/us500.shp")
type(base)
base.head()
base.info()
base.plot()
geometry = [Point(xy) for xy in zip(parks["Longitude"], parks["Latitude"])]
geodata = gpd.GeoDataFrame(parkshort, crs=crs, geometry=geometry)
geodata.plot()
fig, ax = plt.subplots(figsize=(4,4))
base.plot(ax=ax, facecolor='cadetblue', edgecolor='w', alpha=1,linewidth=1)
geodata.plot(ax=ax, color='black', markersize=5);
fig.suptitle('National Parks of the United States', fontsize=12)
plt.xlim([-168,-67])
plt.savefig("parkgeodata")
parks_sorted=parks.sort_values('Acres', ascending=False)
parks_sorted
plt.figure(figsize=(20,20))
plt.barh(parks_sorted['Park Name'], parks_sorted['Acres'], color='#86bf91', height=0.85)
plt.ylabel("National Park", fontsize='14', fontweight='bold', rotation=45)
plt.xlabel("Thousands of Acres", fontsize='14', fontweight='bold')
plt.xticks(fontsize='10')
plt.yticks(fontsize='10')
plt.margins(y=0.01)
plt.title("The Acreage of National Parks in the United States", loc='left', fontsize='20', fontweight='bold')
plt.gcf().subplots_adjust(bottom=0.35, top=0.7)
plt.tight_layout()
plt.savefig("NPAcreagetotal.png", bbox_inches='tight', pad_inches=0.0)
plt.show()
park5 = parks.drop(['State', 'Park Code', 'Latitude', 'Longitude'], axis=1)
park5.info()
dfpark5 = park5
dfparkt5=dfpark5.sort_values(by=['Acres'], ascending=False).head(5)
dfparkb5=dfpark5.sort_values(by=['Acres'], ascending=False).tail(5)
dfmerge1=pd.concat([dfparkt5,dfparkb5])
dfmerge1
dfmerge1.plot.bar(x='Park Name',y='Acres', figsize=(20,20), legend=None, color='#86bf91')
plt.title('A Comparison of the 5 Largest and 5 Smallest National Parks by Acreage', loc='left', fontsize=20, fontweight='bold')
plt.gcf().subplots_adjust(bottom=0.35, top=0.7)
plt.subplots_adjust(bottom=0.5)
plt.xticks(rotation=45, fontsize='10', ha='right')
plt.yticks(fontsize='10')
plt.xlabel('National Park', fontsize='14', fontweight='bold')
plt.ylabel('Millions of Acres', labelpad=50, rotation=45, fontsize='14', fontweight='bold')
plt.tight_layout()
plt.savefig("5acres.png")
plt.show()
species2.head()
totalspecies=species2["Park Name"].value_counts(sort=True)
totalspecies
totalspecies.plot.barh(y='Park Name', figsize=(20,20), width=0.85, color='#9370db')
plt.title('Total Species Report Count from National Parks', loc='left', fontsize=20, fontweight='bold')
plt.subplots_adjust(left=0.3)
plt.xticks(fontsize='10')
plt.yticks(fontsize='10')
plt.xlabel('Number of Reports', labelpad=10, fontsize='14', fontweight='bold')
plt.ylabel('National Park', labelpad=50, rotation=45, fontsize='14', fontweight='bold')
plt.tight_layout()
plt.savefig("totalspeciescount.png")
plt.show()
species3 = species2.drop(['Species_ID','Category','Record_Status'], axis=1)
species3.info()
dfspeciest5 = species3.value_counts(['Park Name'], ascending=False).head(5)
dfspeciesb5 = species3.value_counts(['Park Name'], ascending=False).tail(5)
dfmerge2 = | pd.concat([dfspeciest5,dfspeciesb5]) | pandas.concat |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all= | pd.read_csv('real_adj_now.csv',index_col=0,header=0) | pandas.read_csv |
import data_helpers_neutrals
import pandas as pd
import numpy as np
#from keras.preprocessing import sequence
#from gensim.models import word2vec
#from os.path import join, exists, split
#import os
#import h5py
from keras.models import load_model
"""
This module gives a prediction (betwen 0 and 1) for a set of input sentences and calculates accuracy on whole set.
INPUT: a keras model, a vocabulary dictionary and a file with tweets to test
OUTPUT: accuracy
"""
# import vocabulary
input_data = np.load('C:/AI/eh_CNN_twitter/data/semeval/input_data.npy')
#x_train = input_data[0]
#y_train = input_data[1]
#x_test = input_data[2]
#y_test = input_data[3]
vocabulary_inv = input_data[4]
#neutral_tweets = input_data[5]
vocabulary = {y:x for x,y in vocabulary_inv.items()}
# import CNN model
model = load_model('C:/AI/eh_CNN_twitter/models/mixedTest.h5')
def label_tweets(comp_name):
""" this function uses the trained model and labels new tweets."""
# import company tweets
fpath = 'C:/AI/eh_CNN_twitter/data/comp_tweets/'
comp_tweets = pd.read_csv(fpath+comp_name+'.tsv', sep='\t', encoding = 'utf-8', header=None, error_bad_lines=False)#, nrows = 10)
comp_tweets_original = comp_tweets.copy(deep= True)
comp_tweets = comp_tweets.iloc[:,5]
comp_tweets = [str(s).strip() for s in comp_tweets]
comp_tweets = [data_helpers_neutrals.clean_str(sent) for sent in comp_tweets]
comp_tweets = [s.split(" ") for s in comp_tweets]
comp_tweets = pad_tweets(comp_tweets)
print(comp_name, ': cleaned and padded')
comp_tweets = [[vocabulary[word] for word in tweet] for tweet in comp_tweets]
y = model.predict(np.array(comp_tweets), batch_size=1)
print(comp_name, ':prediction done!')
comp_tweets_labeled = | pd.DataFrame(y) | pandas.DataFrame |
import pandas as pd
import numpy as np
import joblib
import os
import argparse
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import accuracy_score
# load the titanic dataset
train_data = | pd.read_csv("titanic.csv") | pandas.read_csv |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for PySMO's family of SurrogateTrainer (PysmoPolyTrainer, PysmoRBFTrainer and PysmoKrigingTrainer)
"""
import pytest
import numpy as np
import pandas as pd
import io
import os
from math import sin, cos, log, exp
from pathlib import Path
from io import StringIO
import re
import pyomo as pyo
from pyomo.environ import ConcreteModel, Var, Constraint
from pyomo.common.tempfiles import TempfileManager
from idaes.core.surrogate.pysmo import (
polynomial_regression as pr,
radial_basis_function as rbf,
kriging as krg,
)
from idaes.core.surrogate.pysmo_surrogate import (
PysmoTrainer,
PysmoPolyTrainer,
PysmoRBFTrainer,
PysmoKrigingTrainer,
PysmoSurrogate,
PysmoSurrogateTrainingResult,
PysmoTrainedSurrogate,
)
from idaes.core.surrogate.surrogate_block import SurrogateBlock
from idaes.core.util.exceptions import ConfigurationError
from idaes.core.surrogate.metrics import compute_fit_metrics
dirpath = Path(__file__).parent.resolve()
# String representation of json output for testing
jstring_poly_1 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-75.26111111111476], [-8.815277777775934], [18.81527777777826], [-2.2556956302821618e-13]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 3.772981926886132e-13, "MSE": 1.5772926701095834e-25, "R2": 1.0, "Adjusted R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "poly"}'
)
jstring_poly_2 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-75.26111111111476], [-8.815277777775934], [18.81527777777826], [-2.2556956302821618e-13]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 3.772981926886132e-13, "MSE": 1.5772926701095834e-25, "R2": 1.0, "Adjusted R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-3.0033074724377813], [0.2491731318906352], [1.7508268681094337], [-6.786238238021269e-15]], '
'"final_polynomial_order": 1, "errors": {"MAE": 1.1901590823981678e-14, "MSE": 1.5225015470765528e-28, "R2": 1.0, "Adjusted R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_poly_3 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["log(IndexedParam[x1])", "sin(IndexedParam[x2])"], '
'"optimal_weights_array": [[-14.290243902439855], [6.4274390243899795], [3.572560975609962], [1.9753643165643098e-13], [-4.4048098502003086e-14]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 1.4210854715202004e-14, "MSE": 2.8188629679897487e-28, "R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["log(IndexedParam[x1])", "sin(IndexedParam[x2])"], '
'"optimal_weights_array": [[5.704971042443143], [2.4262427606248815], [-0.42624276060821653], [-5.968545102597034e-11], [6.481176706429892e-12]], '
'"final_polynomial_order": 1, "errors": {"MAE": 3.869645344896829e-12, "MSE": 7.189162598662876e-23, "R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_poly_4 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["IndexedParam[x1]/IndexedParam[x2]"], '
'"optimal_weights_array": [[-110.15000000001504], [-17.53750000000189], [27.537500000006148], [-5.3967136315336006e-11]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 1.0317080523236656e-12, "MSE": 2.126880072091303e-24, "R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "other", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["IndexedParam[x1]/IndexedParam[x2]"], '
'"optimal_weights_array": [[-12.523574144487087], [-2.1308935361219556], [4.1308935361216435], [3.6347869158959156e-12]], '
'"final_polynomial_order": 1, "errors": {"MAE": 7.762679388179095e-14, "MSE": 6.506051429719772e-27, "R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "other", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_rbf = (
'{"model_encoding": '
'{"z1": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"centres": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"basis_function": "gaussian", '
'"weights": [[-69.10791015625], [-319807.1317138672], [959336.2551269531], [-959973.7440185547], [320514.66677856445]], '
'"sigma": 0.05, "regularization_parameter": 0.0, '
'"rmse": 0.0005986693684275349, "R2": 0.9999971327598984, '
'"x_data_min": [[1, 5]], "x_data_max": [[5, 9]], "y_data_min": [10], "y_data_max": [50]}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "centres": "numpy", '
'"basis_function": "str", "weights": "numpy", "sigma": "str", "regularization_parameter": "str", '
'"rmse": "str", "R2": "str", "x_data_min": "numpy", "x_data_max": "numpy", "y_data_min": "numpy", '
'"y_data_max": "numpy"}}, '
'"z2": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"centres": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"basis_function": "gaussian", "weights": [[-69.10791015625], [-319807.1317138672], [959336.2551269531], [-959973.7440185547], [320514.66677856445]], '
'"sigma": 0.05, "regularization_parameter": 0.0, '
'"rmse": 0.0005986693684275349, "R2": 0.9999971327598984, '
'"x_data_min": [[1, 5]], "x_data_max": [[5, 9]], "y_data_min": [6], "y_data_max": [14]}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "centres": "numpy", '
'"basis_function": "str", "weights": "numpy", "sigma": "str", "regularization_parameter": "str", '
'"rmse": "str", "R2": "str", "x_data_min": "numpy", "x_data_max": "numpy", "y_data_min": "numpy", '
'"y_data_max": "numpy"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "rbf"}'
)
jstring_krg = (
'{"model_encoding": '
'{"z1": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[1, 5], [2, 6], [3, 7], [4, 8], [5, 9]], "x_data_min": [[1, 5]], "x_data_max": [[5, 9]], '
'"x_data_scaled": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"optimal_weights": [0.027452451845611077, 0.0010443446337808024], '
'"optimal_p": 2, "optimal_mean": [[30.00000000077694]], "optimal_variance": [[6503.3113222215325]], '
'"regularization_parameter": 1.000000000001e-06, '
'"optimal_covariance_matrix": [[1.000001, 0.9982205353479938, 0.9929011178300284, 0.9840983398813247, 0.971905407660152], '
"[0.9982205353479938, 1.000001, 0.9982205353479938, 0.9929011178300284, 0.9840983398813247], "
"[0.9929011178300284, 0.9982205353479938, 1.000001, 0.9982205353479938, 0.9929011178300284], "
"[0.9840983398813247, 0.9929011178300284, 0.9982205353479938, 1.000001, 0.9982205353479938], "
"[0.971905407660152, 0.9840983398813247, 0.9929011178300284, 0.9982205353479938, 1.000001]], "
'"covariance_matrix_inverse": [[108728.9916945844, -240226.85108007095, 82932.18571364644, 121970.72026795016, -73364.51387189297], '
"[-240226.85108202277, 589985.9891969847, -341158.67300272395, -130592.8567227173, 121970.72027126199], "
"[82932.18571952915, -341158.67301448685, 516416.75018761755, -341158.6729826693, 82932.18570353556], "
"[121970.72026201998, -130592.85670691582, -341158.6729945546, 589985.9891699858, -240226.8510697507], "
"[-73364.51386989365, 121970.72026527137, 82932.18570954115, -240226.85107176506, 108728.99169106234]], "
'"optimal_y_mu": [[-20.00000000077694], [-10.00000000077694], [-7.769394017032027e-10], [9.99999999922306], [19.99999999922306]], '
'"training_R2": 0.9999962956016578, "training_rmse": 0.02721910484270722}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "x_data_min": "numpy", "x_data_max": "numpy", '
'"x_data_scaled": "numpy", "optimal_weights": "numpy", "optimal_p": "str", "optimal_mean": "numpy", '
'"optimal_variance": "numpy", "regularization_parameter": "str", "optimal_covariance_matrix": "numpy", '
'"covariance_matrix_inverse": "numpy", "optimal_y_mu": "numpy", "training_R2": "str", "training_rmse": "str"}}, '
'"z2": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[1, 5], [2, 6], [3, 7], [4, 8], [5, 9]], "x_data_min": [[1, 5]], "x_data_max": [[5, 9]], '
'"x_data_scaled": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"optimal_weights": [0.02749666901085125, 0.001000000000000049], '
'"optimal_p": 2, "optimal_mean": [[9.999999999902883]], "optimal_variance": [[260.13320726701056]], '
'"regularization_parameter": 1e-06, '
'"optimal_covariance_matrix": [[1.000001, 0.998220543300601, 0.9929011494709431, 0.9840984104422155, 0.9719055315475238], '
"[0.998220543300601, 1.000001, 0.998220543300601, 0.9929011494709431, 0.9840984104422155], "
"[0.9929011494709431, 0.998220543300601, 1.000001, 0.998220543300601, 0.9929011494709431], "
"[0.9840984104422155, 0.9929011494709431, 0.998220543300601, 1.000001, 0.998220543300601], "
"[0.9719055315475238, 0.9840984104422155, 0.9929011494709431, 0.998220543300601, 1.000001]], "
'"covariance_matrix_inverse": [[108729.13455237681, -240227.09704128528, 82932.15558036882, 121970.94143487987, -73364.601633614], '
"[-240227.0970392892, 589986.4681472526, -341158.6596781079, -130593.32427863385, 121970.94144222786], "
"[82932.15557448889, -341158.6596663887, 516416.7835787105, -341158.659633822, 82932.15555811858], "
"[121970.94144067129, -130593.32429416949, -341158.6596220617, 589986.4680877628, -240227.09701875152], "
"[-73364.60163552182, 121970.94144804058, 82932.15555219717, -240227.09701673465, 108729.13454474375]], "
'"optimal_y_mu": [[-3.999999999902883], [-1.999999999902883], [9.711698112369049e-11], [2.000000000097117], [4.000000000097117]], '
'"training_R2": 0.9999962956250228, "training_rmse": 0.005443803800474329}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "x_data_min": "numpy", "x_data_max": "numpy", '
'"x_data_scaled": "numpy", "optimal_weights": "numpy", "optimal_p": "str", "optimal_mean": "numpy", '
'"optimal_variance": "numpy", "regularization_parameter": "str", "optimal_covariance_matrix": "numpy", '
'"covariance_matrix_inverse": "numpy", "optimal_y_mu": "numpy", "training_R2": "str", "training_rmse": "str"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "kriging"}'
)
class TestSurrogateTrainingResult:
@pytest.fixture
def pysmo_output_pr(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_pr = pr.PolynomialRegression(
data, data, maximum_polynomial_order=1, overwrite=True, multinomials=True
)
vars = init_pr.get_feature_vector()
init_pr.training()
return init_pr, vars
@pytest.fixture
def pysmo_output_rbf(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_rbf = rbf.RadialBasisFunctions(
data, basis_function="linear", overwrite=True
)
vars = init_rbf.get_feature_vector()
init_rbf.training()
return init_rbf, vars
@pytest.fixture
def pysmo_output_krg(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_krg = krg.KrigingModel(data, numerical_gradients=True, overwrite=True)
vars = init_krg.get_feature_vector()
init_krg.training()
return init_krg, vars
@pytest.mark.unit
def test_init(self):
init_func = PysmoSurrogateTrainingResult()
assert init_func.metrics == {}
assert init_func._model == None
assert init_func.expression_str == ""
@pytest.mark.unit
def test_model_poly(self, pysmo_output_pr):
out1, vars = pysmo_output_pr
init_func_poly = PysmoSurrogateTrainingResult()
init_func_poly.model = out1
assert init_func_poly.expression_str == str(
out1.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_poly._model is not None
assert isinstance(init_func_poly._model, pr.PolynomialRegression)
assert init_func_poly._model == out1
@pytest.mark.unit
def test_model_rbf(self, pysmo_output_rbf):
out2, vars = pysmo_output_rbf
init_func_rbf = PysmoSurrogateTrainingResult()
init_func_rbf.model = out2
assert init_func_rbf.expression_str == str(
out2.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_rbf._model is not None
assert isinstance(init_func_rbf._model, rbf.RadialBasisFunctions)
assert init_func_rbf._model == out2
@pytest.mark.unit
def test_model_krg(self, pysmo_output_krg):
out3, vars = pysmo_output_krg
init_func_krg = PysmoSurrogateTrainingResult()
init_func_krg.model = out3
assert init_func_krg.expression_str == str(
out3.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_krg._model is not None
assert isinstance(init_func_krg._model, krg.KrigingModel)
assert init_func_krg._model == out3
class TestTrainedSurrogate:
@pytest.fixture
def pysmo_outputs(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_pr = pr.PolynomialRegression(
data, data, maximum_polynomial_order=1, overwrite=True, multinomials=True
)
vars = init_pr.get_feature_vector()
init_pr.training()
init_rbf = rbf.RadialBasisFunctions(
data, basis_function="linear", overwrite=True
)
init_rbf.get_feature_vector()
init_rbf.training()
init_krg = krg.KrigingModel(data, numerical_gradients=True, overwrite=True)
init_krg.get_feature_vector()
init_krg.training()
return init_pr, init_rbf, init_krg, vars
@pytest.mark.unit
def test_init(self):
init_func = PysmoTrainedSurrogate()
assert init_func._data == {}
assert init_func.model_type == ""
assert init_func.num_outputs == 0
assert init_func.output_labels == []
assert init_func.input_labels == None
assert init_func.input_bounds == None
init_func1 = PysmoTrainedSurrogate(model_type="poly")
assert init_func1._data == {}
assert init_func1.model_type == "poly"
assert init_func1.num_outputs == 0
assert init_func1.output_labels == []
assert init_func1.input_labels == None
assert init_func1.input_bounds == None
@pytest.mark.unit
def test_add_result(self, pysmo_outputs):
# These need to be tested this way to made sure ``add_result`` builds out model object propoerly.
out1, out2, out3, vars = pysmo_outputs
init_func = PysmoTrainedSurrogate()
outvar = "z1"
init_func.add_result(outvar, out1)
assert init_func.output_labels == ["z1"]
assert init_func._data[outvar] == out1
outvar = "z2"
init_func.add_result(outvar, out2)
assert init_func.output_labels == ["z1", "z2"]
assert init_func._data[outvar] == out2
outvar = "z3"
init_func.add_result(outvar, out3)
assert init_func.output_labels == ["z1", "z2", "z3"]
assert init_func._data[outvar] == out3
@pytest.mark.unit
def test_get_result(self, pysmo_outputs):
out1, out2, out3, vars = pysmo_outputs
init_func = PysmoTrainedSurrogate()
outvar = "z1"
init_func.add_result(outvar, out1)
outvar = "z2"
init_func.add_result(outvar, out2)
outvar = "z3"
init_func.add_result(outvar, out3)
for i in range(len(init_func.output_labels)):
assert init_func.get_result(init_func.output_labels[i]) == pysmo_outputs[i]
class TestPysmoPolyTrainer:
@pytest.fixture
def pysmo_poly_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
poly_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return poly_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_poly_trainer):
# Check all defaults
assert pysmo_poly_trainer.model_type == "poly"
assert pysmo_poly_trainer.config.maximum_polynomial_order == None
assert pysmo_poly_trainer.config.multinomials == False
assert pysmo_poly_trainer.config.number_of_crossvalidations == 3
assert pysmo_poly_trainer.config.training_split == 0.8
assert pysmo_poly_trainer.config.solution_method == None
assert pysmo_poly_trainer.config.extra_features == None
@pytest.mark.unit
def test_set_polynomial_order_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.maximum_polynomial_order = 3
assert pysmo_poly_trainer.config.maximum_polynomial_order == 3
@pytest.mark.unit
def test_set_polynomial_order_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'maximum_polynomial_order'",
):
pysmo_poly_trainer.config.maximum_polynomial_order = 3.1
@pytest.mark.unit
def test_set_polynomial_order_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'maximum_polynomial_order'",
):
pysmo_poly_trainer.config.maximum_polynomial_order = 0
@pytest.mark.unit
def test_set_number_of_crossvalidations_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.number_of_crossvalidations = 5
assert pysmo_poly_trainer.config.number_of_crossvalidations == 5
@pytest.mark.unit
def test_set_number_of_crossvalidations_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'number_of_crossvalidations'",
):
pysmo_poly_trainer.config.number_of_crossvalidations = 3.1
@pytest.mark.unit
def test_set_number_of_crossvalidations_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'number_of_crossvalidations'",
):
pysmo_poly_trainer.config.number_of_crossvalidations = 0
@pytest.mark.unit
def test_set_training_split_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.training_split = 0.5
assert pysmo_poly_trainer.config.training_split == 0.5
@pytest.mark.unit
def test_set_training_split_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'training_split'"
):
pysmo_poly_trainer.config.training_split = -0.5
@pytest.mark.unit
def test_set_solution_method_righttype_1(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "mle"
assert pysmo_poly_trainer.config.solution_method == "mle"
@pytest.mark.unit
def test_set_solution_method_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "pyomo"
assert pysmo_poly_trainer.config.solution_method == "pyomo"
@pytest.mark.unit
def test_set_solution_method_righttype_3(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "bfgs"
assert pysmo_poly_trainer.config.solution_method == "bfgs"
@pytest.mark.unit
def test_set_solution_method_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'solution_method'"
):
pysmo_poly_trainer.config.solution_method = "bfgh"
@pytest.mark.unit
def test_set_multinomials_righttype_1(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = True
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = False
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_righttype_3(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = "False"
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_righttype_4(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = "True"
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_5(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 1
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_6(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 0
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(ValueError):
pysmo_poly_trainer.config.multinomials = 2
@pytest.mark.unit
def test_set_extra_features_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.extra_features = ["x1 / x2"]
assert pysmo_poly_trainer.config.extra_features == ["x1 / x2"]
@pytest.mark.unit
def test_set_extra_features_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.extra_features = ["x1 / x2", "sin(x1)"]
assert pysmo_poly_trainer.config.extra_features == ["x1 / x2", "sin(x1)"]
@pytest.mark.unit
def test_set_extra_features_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(NameError):
pysmo_poly_trainer.config.extra_features = x1 / x2
@pytest.mark.unit
def test_set_extra_features_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(ValueError):
pysmo_poly_trainer.config.extra_features = 10
@pytest.mark.unit
def test_create_model_no_extra_features(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 1
pysmo_poly_trainer.config.maximum_polynomial_order = 1
pysmo_poly_trainer.config.solution_method = "mle"
pysmo_poly_trainer.config.number_of_crossvalidations = 2
pysmo_poly_trainer.config.training_split = 0.9
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_poly_trainer._create_model(data, output_label)
assert (
model.max_polynomial_order
== pysmo_poly_trainer.config.maximum_polynomial_order
)
assert model.overwrite == True
assert model.multinomials == pysmo_poly_trainer.config.multinomials
assert model.solution_method == "mle"
assert (
model.number_of_crossvalidations
== pysmo_poly_trainer.config.number_of_crossvalidations
)
assert model.fraction_training == 0.9
assert model.filename == "solution.pickle"
assert model.number_of_x_vars == data.shape[1] - 1
assert model.additional_term_expressions == []
assert model.extra_terms_feature_vector == None
np.testing.assert_array_equal(model.original_data, data.values)
np.testing.assert_array_equal(model.regression_data, data.values)
assert model.regression_data_columns == data.columns.tolist()[:-1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_with_extra_features(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 0
pysmo_poly_trainer.config.maximum_polynomial_order = 2
pysmo_poly_trainer.config.solution_method = "mle"
pysmo_poly_trainer.config.number_of_crossvalidations = 2
pysmo_poly_trainer.config.training_split = 0.9
pysmo_poly_trainer.config.extra_features = [
"sin(x1)/cos(x2)",
"log(x1)*sin(x2)",
"x1/x2",
]
output_label = "z1"
data = {
"x1": [1, 2, 3, 4, 5, 6, 7, 8],
"x2": [5, 6, 7, 8, 9, 10, 11, 12],
"z1": [10, 20, 30, 40, 50, 60, 70, 80],
}
data = pd.DataFrame(data)
model = pysmo_poly_trainer._create_model(data, output_label)
assert model.overwrite == True
assert model.multinomials == pysmo_poly_trainer.config.multinomials
assert model.solution_method == "mle"
assert (
model.number_of_crossvalidations
== pysmo_poly_trainer.config.number_of_crossvalidations
)
assert (
model.max_polynomial_order
== pysmo_poly_trainer.config.maximum_polynomial_order
)
assert model.fraction_training == 0.9
assert model.filename == "solution.pickle"
assert model.number_of_x_vars == data.shape[1] - 1
np.testing.assert_array_equal(model.original_data, data.values)
np.testing.assert_array_equal(model.regression_data, data.values)
assert model.regression_data_columns == data.columns.tolist()[:-1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
assert len(model.additional_term_expressions) == 3
assert isinstance(model.additional_term_expressions, list)
assert isinstance(
model.additional_term_expressions[0],
pyo.core.expr.numeric_expr.NPV_DivisionExpression,
)
assert isinstance(
model.additional_term_expressions[1],
pyo.core.expr.numeric_expr.ProductExpression,
)
assert isinstance(
model.additional_term_expressions[2],
pyo.core.expr.numeric_expr.NPV_DivisionExpression,
)
assert (
str(model.additional_term_expressions[0])
== "sin(IndexedParam[x1])/cos(IndexedParam[x2])"
)
assert (
str(model.additional_term_expressions[1])
== "log(IndexedParam[x1])*sin(IndexedParam[x2])"
)
assert (
str(model.additional_term_expressions[2])
== "IndexedParam[x1]/IndexedParam[x2]"
)
assert model.extra_terms_feature_vector == None
class TestPysmoRBFTrainer:
@pytest.fixture
def pysmo_rbf_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
rbf_trainer = PysmoRBFTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return rbf_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_rbf_trainer):
# Check all defaults
assert pysmo_rbf_trainer.model_type == "None rbf"
assert pysmo_rbf_trainer.config.basis_function == None
assert pysmo_rbf_trainer.config.regularization == None
assert pysmo_rbf_trainer.config.solution_method == None
@pytest.mark.unit
def test_set_basis_function_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "linear"
assert pysmo_rbf_trainer.config.basis_function == "linear"
@pytest.mark.unit
def test_set_basis_function_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "cubic"
assert pysmo_rbf_trainer.config.basis_function == "cubic"
@pytest.mark.unit
def test_set_basis_function_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "imq"
assert pysmo_rbf_trainer.config.basis_function == "imq"
@pytest.mark.unit
def test_set_basis_function_righttype_4(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "mq"
assert pysmo_rbf_trainer.config.basis_function == "mq"
@pytest.mark.unit
def test_set_basis_function_righttype_5(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "gaussian"
assert pysmo_rbf_trainer.config.basis_function == "gaussian"
@pytest.mark.unit
def test_set_basis_function_righttype_6(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "spline"
assert pysmo_rbf_trainer.config.basis_function == "spline"
@pytest.mark.unit
def test_set_basis_function_outdomain(self, pysmo_rbf_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'basis_function'"
):
pysmo_rbf_trainer.config.basis_function = "mqimq"
@pytest.mark.unit
def test_set_solution_method_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "algebraic"
assert pysmo_rbf_trainer.config.solution_method == "algebraic"
@pytest.mark.unit
def test_set_solution_method_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "pyomo"
assert pysmo_rbf_trainer.config.solution_method == "pyomo"
@pytest.mark.unit
def test_set_solution_method_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "bfgs"
assert pysmo_rbf_trainer.config.solution_method == "bfgs"
@pytest.mark.unit
def test_set_solution_method_wrongtype(self, pysmo_rbf_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'solution_method'"
):
pysmo_rbf_trainer.config.solution_method = "mle"
@pytest.mark.unit
def test_set_regularization_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = True
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = False
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = "False"
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_4(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = "True"
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_5(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = 1
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_6(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = 0
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_wrongtype(self, pysmo_rbf_trainer):
with pytest.raises(ValueError):
pysmo_rbf_trainer.config.regularization = 2
@pytest.mark.unit
def test_create_model_defaults(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = None
pysmo_rbf_trainer.config.regularization = "True"
pysmo_rbf_trainer.config.solution_method = None
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "gaussian"
assert model.regularization == True
assert model.solution_method == "algebraic"
# assert model.filename == 'pysmo_Nonerbf_z5.pickle'
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_cubic(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "cubic"
pysmo_rbf_trainer.config.regularization = "False"
pysmo_rbf_trainer.config.solution_method = "pyomo"
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "cubic"
assert model.regularization == False
assert model.solution_method == "pyomo"
assert model.filename == "solution.pickle"
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_imq(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "imq"
pysmo_rbf_trainer.config.regularization = True
pysmo_rbf_trainer.config.solution_method = "bfgs"
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "imq"
assert model.regularization == True
assert model.solution_method == "bfgs"
# assert model.filename == 'pysmo_Nonerbf_z5.pickle'
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
class TestPysmoKrigingTrainer:
@pytest.fixture
def pysmo_krg_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
krg_trainer = PysmoKrigingTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return krg_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_krg_trainer):
# Check all defaults
assert pysmo_krg_trainer.model_type == "kriging"
assert pysmo_krg_trainer.config.numerical_gradients == True
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_1(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = True
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_2(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = False
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_3(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = "False"
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_4(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = "True"
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_5(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = 1
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_6(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = 0
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_wrongtype(self, pysmo_krg_trainer):
with pytest.raises(ValueError):
pysmo_krg_trainer.config.regularization = 2
@pytest.mark.unit
def test_set_numerical_gradients_righttype_1(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = True
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_2(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = False
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_righttype_3(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = "False"
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_righttype_4(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = "True"
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_5(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = 1
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_6(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = 0
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_wrongtype(self, pysmo_krg_trainer):
with pytest.raises(ValueError):
pysmo_krg_trainer.config.numerical_gradients = 2
@pytest.mark.unit
def test_create_model_defaults(self, pysmo_krg_trainer):
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_krg_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.regularization == True
assert model.num_grads == True
assert model.filename == "solution.pickle"
assert model.num_vars == data.shape[1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_no_regularization(self, pysmo_krg_trainer):
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
pysmo_krg_trainer.config.regularization = False
model = pysmo_krg_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.regularization == False
assert model.num_grads == True
assert model.filename == "solution.pickle"
assert model.num_vars == data.shape[1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_no_numerical_grads(self, pysmo_krg_trainer):
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
pysmo_krg_trainer.config.numerical_gradients = "False"
model = pysmo_krg_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.regularization == True
assert model.num_grads == False
assert model.filename == "solution.pickle"
assert model.num_vars == data.shape[1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
class TestPysmoSurrogate:
@pytest.fixture
def pysmo_surr1(self):
training_data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
} # , 'z2': [6, 8, 10, 12, 14]}
training_data = pd.DataFrame(training_data)
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
} # , 'z2': [6, 8, 10, 12]}#{'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
pysmo_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
maximum_polynomial_order=1,
multinomials=True,
number_of_crossvalidations=3,
)
a1 = pysmo_trainer.train_surrogate()
pysmo_surr1 = PysmoSurrogate(a1, input_labels, output_labels, bnds)
return pysmo_surr1
@pytest.fixture
def pysmo_surr2_poly(self):
training_data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
"z2": [6, 8, 10, 12, 14],
}
training_data = pd.DataFrame(training_data)
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1", "z2"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
pysmo_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
maximum_polynomial_order=1,
multinomials=True,
number_of_crossvalidations=3,
)
a2_poly = pysmo_trainer.train_surrogate()
pysmo_surr2_poly = PysmoSurrogate(a2_poly, input_labels, output_labels)
return (a2_poly, pysmo_surr2_poly)
@pytest.fixture
def pysmo_surr2_rbf(self):
training_data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
"z2": [6, 8, 10, 12, 14],
}
training_data = pd.DataFrame(training_data)
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1", "z2"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
pysmo_trainer2 = PysmoRBFTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
basis_function="gaussian",
regularization=False,
)
a2_rbf = pysmo_trainer2.train_surrogate()
pysmo_surr2_rbf = PysmoSurrogate(a2_rbf, input_labels, output_labels, bnds)
return (a2_rbf, pysmo_surr2_rbf)
@pytest.fixture
def pysmo_surr2_krg(self):
training_data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
"z2": [6, 8, 10, 12, 14],
}
training_data = pd.DataFrame(training_data)
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1", "z2"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
np.random.seed(0)
pysmo_trainer3 = PysmoKrigingTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
regularization=True,
numerical_gradients=True,
)
a2_krg = pysmo_trainer3.train_surrogate()
pysmo_surr2_krg = PysmoSurrogate(a2_krg, input_labels, output_labels, bnds)
return (a2_krg, pysmo_surr2_krg)
@pytest.fixture
def pysmo_surr3(self):
x1 = [1, 2, 3, 4, 5, 6]
x2 = [5, 6, 7, 8, 9, 10]
z1 = [
3.5 * x1[i] + 2.5 * x2[i] - 1.5 * (sin(x1[i]) + cos(x2[i]))
for i in range(len(x1))
]
z2 = [
3.5 * x1[i] - 2.5 * x2[i] + 0.5 * (sin(x1[i]) + cos(x2[i]))
for i in range(len(x1))
]
x = {"x1": x1, "x2": x2, "z1": z1, "z2": z2}
training_data = pd.DataFrame(x, columns={"x1", "x2", "z1", "z2"})
# training_data = pd.DataFrame(x, columns={'x1', 'x2', 'z1', 'z2'})
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1", "z2"]
bnds = {"x1": (0, 10), "x2": (0, 10)}
pysmo_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
maximum_polynomial_order=1,
multinomials=False,
extra_features=["sin(x1)", "cos(x2)"],
number_of_crossvalidations=10,
solution_method="mle",
)
a3 = pysmo_trainer.train_surrogate()
pysmo_surr3 = PysmoSurrogate(a3, input_labels, output_labels)
return a3, pysmo_surr3
@pytest.fixture
def pysmo_surr4(self):
training_data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
"z2": [6, 8, 10, 12, 14],
}
training_data = pd.DataFrame(training_data)
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1", "x2"]
output_labels = ["z1", "z2"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
pysmo_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
maximum_polynomial_order=1,
multinomials=False,
number_of_crossvalidations=3,
extra_features=["x1 / x2"],
)
a2 = pysmo_trainer.train_surrogate()
pysmo_surr2 = PysmoSurrogate(a2, input_labels, output_labels)
return pysmo_surr2
@pytest.fixture
def pysmo_surr5_rbf(self):
training_data = {"x1": [1, 2, 3, 4, 5], "z1": [10, 20, 30, 40, 50]}
training_data = pd.DataFrame(training_data)
validation_data = {"x1": [1, 2, 3, 4], "z1": [10, 20, 30, 40]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1"]
output_labels = ["z1"]
bnds = {"x1": (0, 5)}
pysmo_trainer = PysmoRBFTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
basis_function="cubic",
)
a5_rbf = pysmo_trainer.train_surrogate()
pysmo_surr5_rbf = PysmoSurrogate(a5_rbf, input_labels, output_labels, bnds)
return a5_rbf, pysmo_surr5_rbf
@pytest.fixture
def pysmo_surr5_krg(self):
training_data = {"x1": [1, 2, 3, 4, 5], "z1": [10, 20, 30, 40, 50]}
training_data = pd.DataFrame(training_data)
validation_data = {"x1": [1, 2, 3, 4], "z1": [10, 20, 30, 40]}
validation_data = pd.DataFrame(validation_data)
input_labels = ["x1"]
output_labels = ["z1"]
bnds = {"x1": (0, 5)}
pysmo_trainer2 = PysmoKrigingTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=training_data,
validation_dataframe=validation_data,
)
a5_krg = pysmo_trainer2.train_surrogate()
pysmo_surr5_krg = PysmoSurrogate(a5_krg, input_labels, output_labels, bnds)
return a5_krg, pysmo_surr5_krg
@pytest.fixture
def pysmo_surr6(self):
x1 = [1, 2, 3, 4, 5, 6]
x2 = [5, 6, 7, 8, 9, 10]
z1 = [
3.5 * x1[i] + 2.5 * x2[i] - 1.5 * (exp(x1[i] / x2[i]))
for i in range(len(x1))
]
z2 = [
3.5 * x1[i] - 2.5 * x2[i] + 0.5 * (exp(x1[i] / x2[i]))
for i in range(len(x1))
]
x = {"x1": x1, "x2": x2, "z1": z1, "z2": z2}
training_data = pd.DataFrame(x, columns={"x1", "x2", "z1", "z2"})
# training_data = pd.DataFrame(x, columns={'x1', 'x2', 'z1', 'z2'})
validation_data = {
"x1": [1, 2, 3, 4],
"x2": [5, 6, 7, 8],
"z1": [10, 20, 30, 40],
"z2": [6, 8, 10, 12],
} # {'x1': [2.5], 'x2': [6.5], 'z1': [25], 'z2': [9]}
validation_data = | pd.DataFrame(validation_data) | pandas.DataFrame |
import unittest
import pandas as pd
from evalcat.result_list import ResultList
from evalcat.fields.base import Field
"""Mock functions for testing ResultList."""
# Defines a document (search result item) in our collection.
class Result(dict):
def __init__(self, value):
super().__init__(value=value)
# Defines a mock metric 1: the sum of values from a list of results.
def metric_sum(results):
return sum(item['value'] for item in results if item['value'] is not None)
# Defines a mock metric 2: the product of values from a list of results.
def metric_product(results):
prod = 1
for item in results:
if item['value']:
prod *= item['value']
return prod
# Mock Field class using the metrics `metric_sum` and `metric_product`.
class MockField(Field):
def __init__(self):
super().__init__('mock')
def at_k(self, res, k=10):
return {metric.__name__: metric(res[:k]) for metric in [metric_sum, metric_product]}
# Input for ResultList is in this format.
MOCK_RESULTS = {
'system A': {
'query 1': [Result(5), Result(2), Result(1)],
'query 2': [Result(1), Result(3)],
'query 3': [Result(5), Result(2)],
}, 'system B': {
'query 1': [Result(8), Result(3)],
'query 2': [Result(4), Result(1)],
'query 3': [Result(4), Result(1), Result(3), Result(None)],
}
}
# Mock result (Output DataFrame will be made from this).
TEST_RESULTS = {
'system A': {
'query 1': {'metric_sum': 8, 'metric_product': 10},
'query 2': {'metric_sum': 4, 'metric_product': 3},
'query 3': {'metric_sum': 7, 'metric_product': 10}
},
'system B': {
'query 1': {'metric_sum': 11, 'metric_product': 24},
'query 2': {'metric_sum': 5, 'metric_product': 4},
'query 3': {'metric_sum': 8, 'metric_product': 12}
}
}
"""Test Classes"""
class TestResultList(unittest.TestCase):
def setUp(self):
self.result_list = ResultList(MOCK_RESULTS, [MockField()])
def test_get_field_from_summary(self):
pd.testing.assert_frame_equal(
self.result_list.summary['mock'],
self.result_list._get_field_from_summary('mock')
)
with self.assertRaises(ValueError):
self.result_list._get_field_from_summary('wrong_field')
with self.assertRaises(TypeError):
self.result_list._get_field_from_summary(12)
def test_get_query_metric_matrix(self):
# Testing good queries.
for system in ['system A', 'system B']:
test_result = pd.DataFrame({
metric: [
val[metric] for val in TEST_RESULTS[system].values()
] for metric in ['metric_sum', 'metric_product']
}, index=TEST_RESULTS[system].keys())
"""Example output for system A
>>> test_result
| |metric_sum|metric_product|
|-------|----------|--------------|
|query 1| 8| 10|
|query 2| 4| 3|
|query 3| 7| 10|
"""
result_df = self.result_list.get_query_metric_df(field_name='mock', system=system)
| pd.testing.assert_frame_equal(result_df, test_result) | pandas.testing.assert_frame_equal |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To see moto-server logs
# pytest -s -p no:logging tests/test_aio_s3fs.py
import pytest
@pytest.mark.asyncio
async def test_pandas_s3_io(
aio_s3_bucket, aio_s3fs
):
import numpy as np
import pandas as pd
s3_file = f"s3://{aio_s3_bucket}/data.csv"
print(s3_file)
data = {"1": np.random.rand(5)}
df = | pd.DataFrame(data=data) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def redshift_url() -> str:
conn = os.environ["REDSHIFT_URL"]
return conn
@pytest.mark.skipif(not os.environ.get("REDSHIFT_URL"), reason="Do not test Redshift unless `REDSHIFT_URL` is set")
def test_redshift_without_partition(redshift_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(redshift_url, query, protocol="cursor")
# result from redshift might have different order each time
df.sort_values(by="test_int", inplace=True, ignore_index=True)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([0, 1, 2, 3, 4, 1314], dtype="Int64") | pandas.Series |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = | timedelta_range('1 day', periods=3) | pandas.timedelta_range |
from datetime import datetime, date, time, timedelta
from collections import Counter
from flaskblog import api
import sys
import tweepy
import numpy as np
import pandas as pd
import re
from textblob import TextBlob
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
def get_tweets(hashtag):
all_tweets = []
for tweet in tweepy.Cursor(api.search, q=hashtag, lang='en', tweet_mode = 'extended').items(100):
all_tweets.append(tweet.full_text)
return all_tweets
def tweet_to_data_frame(tweets):
df = | pd.DataFrame(data=[tweet for tweet in tweets], columns=['Tweets']) | pandas.DataFrame |
import os
import gc
import re
import json
import random
import numpy as np
import pandas as pd
import scipy.io as sio
from tqdm import tqdm
import matplotlib.pyplot as plt
from daisy.utils.data import incorporate_in_ml100k
from scipy.sparse import csr_matrix
from collections import defaultdict
from IPython import embed
def convert_unique_idx(df, col):
column_dict = {x: i for i, x in enumerate(df[col].unique())}
df[col] = df[col].apply(column_dict.get)
assert df[col].min() == 0
assert df[col].max() == len(column_dict) - 1
return df
def cut_down_data_half(df):
cut_df = pd.DataFrame([])
for u in np.unique(df.user):
aux = df[df['user'] == u].copy()
cut_df = cut_df.append(df.sample(int(len(aux) / 2)))
return cut_df
def filter_users_and_items(df, num_users=None, freq_items=None, top_items=None, keys=['user', 'item']):
'''
Reduces the dataframe to a number of users = num_users and it filters the items by frequency
'''
if num_users is not None:
# df = df[df['user_id'].isin(np.unique(df.user_id)[:num_users])]
df = df[df[keys[0]].isin(np.unique(df[keys[0]])[:num_users])]
# Get top5k books
if top_items is not None:
top5k_books = df[keys[1]].value_counts()[:top_items].index
df = df[df[keys[1]].isin(top5k_books)]
if freq_items is not None:
frequent_items = df['item'].value_counts()[df['item'].value_counts() > freq_items].index
df = df[df[keys[1]].isin(frequent_items)]
return df
def run_statistics(df, src):
path = f'histograms/{src}'
bins = 30
os.makedirs(path, exist_ok=True)
f = open(os.path.join(path, "information.txt"), "w+")
f.write("Information:\n")
f.write("==========================\n")
f.write(f"Interactions: {len(df)}\n")
f.write(f"#users = {df['user'].nunique()}\n")
f.write(f"#items = {df['item'].nunique()}\n")
f.close()
for key in ['user', 'item']:
# OPCIÓ A: HISTOGRAMA
a = pd.DataFrame(df.groupby([key])[key].count())
a.columns = ['value_counts']
a.reset_index(level=[0], inplace=True)
dims = (15, 5)
fig, ax = plt.subplots(figsize=dims)
a["value_counts"].hist(bins=200)
# fig.savefig('hist.jpg')
fig.savefig(os.path.join(path, f'{src}_histogram_{key}_bins={bins}.png'))
fig.clf()
# OPCIÓ : BARPLOT
# a = pd.DataFrame(df_year.groupby(['user'])['user'].count())
# a.columns = ['value_counts']
# a.reset_index(level=[0], inplace=True)
# dims = (15, 5)
# fig, ax = plt.subplots(figsize=dims)
# sns.set_style("darkgrid")
# sns.barplot(ax=ax, x="user", y="value_counts", data=a, palette="Blues_d")
# ax.set(xlabel="User", ylabel="Value Counts")
# plt.xticks(rotation=45)
# plt.show()
# fig.savefig('data.jpg')
def load_rate(src='ml-100k', prepro='origin', binary=True, pos_threshold=None, level='ui', context=False,
gce_flag=False, cut_down_data=False, side_info=False, context_type='', context_as_userfeat=False,
flag_run_statistics=False, remove_top_users=0, remove_on='item'):
"""
Method of loading certain raw data
Parameters
----------
src : str, the name of dataset
prepro : str, way to pre-process raw data input, expect 'origin', f'{N}core', f'{N}filter', N is integer value
binary : boolean, whether to transform rating to binary label as CTR or not as Regression
pos_threshold : float, if not None, treat rating larger than this threshold as positive sample
level : str, which level to do with f'{N}core' or f'{N}filter' operation (it only works when prepro contains 'core' or 'filter')
Returns
-------
df : pd.DataFrame, rating information with columns: user, item, rating, (options: timestamp)
user_num : int, the number of users
item_num : int, the number of items
"""
df = pd.DataFrame()
# import mat73
# a = mat73.loadmat('data/gen-disease/genes_phenes.mat')
# which dataset will use
if src == 'ml-100k':
df = pd.read_csv(f'./data/{src}/u.data', sep='\t', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
if cut_down_data:
df = cut_down_data_half(df) # from 100k to 49.760 interactions
elif src == 'drugs':
union = False
if union == True:
df = pd.read_csv(f'./data/{src}/train_data_contextUNION_sideeffect.csv', engine='python', index_col=0)
df.drop(columns=['context'], inplace=True)
df.rename(columns={'drug': 'user', 'disease': 'item',
'context_union': 'context',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
else:
df = pd.read_csv(f'./data/{src}/train_data_allcontext_sideeffect.csv', engine='python', index_col=0)
df.rename(columns={'drug': 'user', 'disease': 'item',
# 'proteins_drug': 'user-feat',
'proteins': 'item-feat', 'side_effect': 'user-feat'}, inplace=True)
if not context:
df = df[['user', 'item']]
else:
if context_as_userfeat:
df = df[['user', 'item', 'user-feat', 'item-feat']]
else:
df = df[['user', 'item', 'context', 'user-feat']]
df['array_context_flag'] = True
df['timestamp'] = 1
df['rating'] = 1
elif src == 'ml-1m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# only consider rating >=4 for data density
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-10m':
df = pd.read_csv(f'./data/{src}/ratings.dat', sep='::', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
# df = df.query('rating >= 4').reset_index(drop=True).copy()
elif src == 'ml-20m':
df = pd.read_csv(f'./data/{src}/ratings.csv')
df.rename(columns={'userId': 'user', 'movieId': 'item'}, inplace=True)
# df = df.query('rating >= 4').reset_index(drop=True)
elif src == 'books':
if not os.path.exists(f'./data/{src}/preprocessed_books_complete_timestamp.csv'):
df = pd.read_csv(f'./data/{src}/preprocessed_books_complete.csv', sep=',', engine='python')
df.rename(columns={'user_id': 'user', 'book_id': 'item', 'date_added': 'timestamp'}, inplace=True)
df = convert_unique_idx(df, 'user')
df = convert_unique_idx(df, 'item')
df['rating'] = 1.0
# if type(df['timestamp'][0]) == 'str':
df['date'] = pd.to_datetime(df['timestamp'])
df['timestamp'] = pd.to_datetime(df['date'], utc=True)
df['timestamp'] = df.timestamp.astype('int64') // 10 ** 9
df.to_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', index=False)
else:
df = | pd.read_csv(f'./data/{src}/preprocessed_books_complete_timestamp.csv', sep=',', engine='python') | pandas.read_csv |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not | IntervalDtype.is_dtype('D') | pandas.core.dtypes.dtypes.IntervalDtype.is_dtype |
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
# pylint: disable=no-member
class SimulationResult:
def __init__(self, time, y):
self.t = time
self.y = y
from pydemic.distributions import GammaDistribution
default_serial = GammaDistribution(mean=4, std=3.25)
class NonMarkovianSEIRSimulationBase:
"""
Main driver for non-Markovian simulations, used as a base class for
SEIR++ variants.
.. automethod:: __init__
.. automethod:: get_y0
.. automethod:: __call__
.. automethod:: get_model_data
"""
increment_keys = ('infected', 'dead')
def seasonal_forcing(self, t):
phase = 2 * np.pi * (t - self.peak_day) / 365
return (1 + self.seasonal_forcing_amp * np.cos(phase))
def __init__(self, mitigation=None, *, age_distribution=None,
r0=3.2, serial_dist=default_serial,
seasonal_forcing_amp=.2, peak_day=15):
"""
The following keyword-only arguments are recognized:
:arg mitigation: A function of time specifying a multiplicative factor.
Defaults to ``lambda t: 1``.
:arg r0: The basic reproduction number.
:arg serial_dist: The serial interval distribution, i.e.,
an instance of (a subclass of)
:class:`~pydemic.distributions.DistributionBase`).
:arg seasonal_forcing_amp: The amplitude (i.e., maximum fractional change)
in the force of infection due to seasonal effects.
:arg peak_day: The day of the year at which seasonal forcing is greatest.
"""
if mitigation is not None:
self.mitigation = mitigation
else:
self.mitigation = lambda t: 1
self.serial_dist = serial_dist
self.seasonal_forcing_amp = seasonal_forcing_amp
self.peak_day = peak_day
self.r0 = r0
def step(self, state, count, dt):
Rt = (self.r0
* self.mitigation(state.t[count])
* self.seasonal_forcing(state.t[count]))
j_m = np.dot(state.y['infected'][..., count-1::-1],
self.serial_pdf[:count])
j_i = j_m.sum()
S_i = state.y['susceptible'][..., count-1]
new_infected_i = dt * Rt * S_i * j_i / self.total_population
state.y['infected'][..., count] = new_infected_i
state.y['susceptible'][..., count] = (
state.y['susceptible'][..., count-1] - new_infected_i
)
def get_y0(self, total_population, initial_cases, age_distribution):
"""
Initializes a population with a number ``initial_cases`` of initial
infectious individuals distributed in proportion to ``age_distribution``.
:arg total_population: The total size of the population.
:arg initial_cases: The total numnber of initial cases.
:arg age_distribution: A :class:`numpy.ndarray` specifying the relative
fraction of the population in various age groups.
:returns: A :class:`dict` containing the initial conditions for the
``'infected'`` and ``'susceptible'`` compartments.
"""
# FIXME: shouldn't be set here
self.total_population = total_population
self.population = total_population * np.array(age_distribution)
n_demographics = len(age_distribution)
y0 = {}
for key in ('susceptible', 'infected'):
y0[key] = np.zeros((n_demographics,))
y0['infected'][...] = initial_cases * np.array(age_distribution)
y0['susceptible'][...] = self.population - y0['infected']
return y0
def __call__(self, tspan, y0, dt=.05):
"""
:arg tspan: A :class:`tuple` specifying the initiala and final times.
:arg y0: A :class:`dict` with the initial values
(as :class:`numpy.ndarray`'s) for the
``'infected'`` and ``'susceptible'`` compartments, e.g., as returned
by :meth:`get_y0`.
:arg dt: The timestep.
:returns: A :class:`SimulationResult` with attributes ``t``, the array of
times of evaluation, and ``y``, a :class:`dict` of results where time
proceeds along the first axis.
"""
start_time, end_time = tspan
times = np.arange(start_time, end_time + dt, dt)
n_steps = times.shape[0]
pdf = self.serial_dist.pdf(times[1:] - start_time, method='diff')
self.serial_pdf = pdf / dt
y0_all_t = {}
for key in y0:
y0_all_t[key] = np.zeros(y0[key].shape + (n_steps,))
y0_all_t[key][..., 0] = y0[key]
influxes = SimulationResult(times, y0_all_t)
for count in range(1, n_steps):
self.step(influxes, count, dt)
for key, val in influxes.y.items():
influxes.y[key] = val.T
return influxes
@classmethod
def get_model_data(cls, t, **kwargs):
"""
A wrapper to :meth:`__init__` and :meth:`__call__` for initializing and
running a simulation from keyword arguments only (i.e., as used by
:class:`~pydemic.LikelihoodEstimator`.)
:arg t: A :class:`pandas.DatetimeIndex` (or :class:`numpy.ndarray` of
times in days since 2020/1/1) of times at which to evaluate the
solution.
The following keyword arguments are required:
:arg start_day: The day (relative to 2020/1/1) at which to begin the
simulation---i.e., the day corresponding to the initial condition
generated by :meth:`get_y0`.
:arg total_population: The total size of the population.
:arg initial_cases: The total numnber of initial cases.
:arg age_distribution: A :class:`numpy.ndarray` specifying the relative
fraction of the population in various age groups.
In addition, a :class:`~pydemic.MitigationModel` is created from
passed keyword arguments via
:meth:`~pydemic.MitigationModel.init_from_kwargs`.
The following optional keyword arguments are also recognized:
:arg min_mitigation_spacing: The minimum number of days of separation
between mitigation events.
Defaults to ``5``.
All remaining keyword arguments are passed to :meth:`__init__`.
:raises InvalidParametersError: if ``t`` specifies any days of evaluation
which are not at least one day after ``start_day``.
:raises InvalidParametersError: if mitigation events are not ordered and
separated by ``min_mitigation_spacing``.
:returns: A :class:`pandas.DataFrame` of simulation results.
"""
if isinstance(t, pd.DatetimeIndex):
t_eval = (t - | pd.to_datetime('2020-01-01') | pandas.to_datetime |
from typing import Union, Dict, List, Optional
from pathlib import Path
from datetime import date
import numpy as np
from tqdm import tqdm
import pandas as pd
from .utils import get_basin_list
from .datautils import load_forcing, load_discharge
def split_basins(
camels_root: Union[str, Path],
basin_list: Union[str, Path],
split: List[float],
store_folder: Union[str, Path],
timeseries: List[str],
dataset: List[str],
seed: int,
normalize: bool = True,
):
if isinstance(basin_list, str):
basin_list = Path(basin_list)
elif not isinstance(basin_list, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if isinstance(store_folder, str):
store_folder = Path(store_folder)
elif not isinstance(store_folder, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if sum(split) > 1:
raise ValueError(f"sum of splits must be 1, not {sum(split)}")
if len(split) not in (2, 3):
raise ValueError(f"length of split must be 2 or 3, not {len(split)}")
np.random.seed(seed)
store_folder = store_folder / f"split_seed_{seed}"
store_folder.mkdir(parents=True, exist_ok=True)
basins = np.loadtxt(basin_list, dtype="str")
np.random.shuffle(basins)
if len(split) == 2:
basins_test = basins[: int(len(basins) * split[1])]
basins_train = basins[int(len(basins) * split[1]) :]
else:
basins_test = basins[: int(len(basins) * split[2])]
basins_validation = basins[
int(len(basins) * split[2])
: int(len(basins) * split[1]) + int(len(basins) * split[2])
]
basins_train = basins[
int(len(basins) * split[1]) + int(len(basins) * split[2]) :
]
np.savetxt(store_folder / "basins_test.txt", basins_test, fmt="%s")
np.savetxt(store_folder / "basins_train.txt", basins_train, fmt="%s")
if len(split) == 3:
np.savetxt(store_folder / "basins_validation.txt", basins_validation, fmt="%s")
if normalize:
create_normalization_file(
camels_root,
store_folder / "basins_train.txt",
dataset=dataset,
timeseries=timeseries,
)
def cross_validation_split(
camels_root: Union[str, Path],
basin_list: Union[str, Path],
k: int,
test_split: float,
store_folder: Union[str, Path],
seed: int,
dataset: List[str],
timeseries: List[str],
normalize: bool = True,
):
if isinstance(basin_list, str):
basin_list = Path(basin_list)
elif not isinstance(basin_list, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
if isinstance(store_folder, str):
store_folder = Path(store_folder)
elif not isinstance(store_folder, Path):
raise TypeError(f"basin_list must be Path or str, not {type(basin_list)}")
store_folder = store_folder / f"cross_validation_seed_{seed}"
store_folder.mkdir(parents=True, exist_ok=True)
np.random.seed(seed)
basins = np.loadtxt(basin_list, dtype="str")
np.random.shuffle(basins)
basins_test = basins[: int(len(basins) * test_split)]
basins = basins[int(len(basins) * test_split) :]
basins_split = np.array_split(basins, k)
np.savetxt(store_folder / "basins_test.txt", basins_test, fmt="%s")
for i, basins_val in enumerate(basins_split):
split_folder = store_folder / f"{i}"
split_folder.mkdir(parents=True, exist_ok=True)
basins_train = np.delete(basins_split, i)
basins_train_list = []
for sub_split in basins_train:
basins_train_list.extend(list(sub_split))
basins_train = np.array(basins_train_list, dtype=object)
del basins_train_list
np.savetxt(split_folder / "basins_val.txt", basins_val, fmt="%s")
np.savetxt(split_folder / "basins_train.txt", basins_train, fmt="%s")
create_normalization_file(
camels_root,
split_folder / "basins_train.txt",
dataset=dataset,
timeseries=timeseries,
)
def combine_cv_datasets(
cv_folder_1: Path,
cv_folder_2: Path,
store_folder: Path,
seed: int,
k: int = 5,
normalize: bool = True,
timeseries: Optional[List[str]] = None,
dataset: Optional[List[str]] = None,
camels_root: Optional[Union[Path, str, List[Union[Path, str]]]] = None,
):
store_folder = store_folder / f"cross_validation_seed_{seed}"
store_folder.mkdir(exist_ok=True, parents=True)
cv1 = cv_folder_1 / f"cross_validation_seed_{seed}"
cv2 = cv_folder_2 / f"cross_validation_seed_{seed}"
test1 = np.loadtxt(cv1 / "basins_test.txt", dtype="str")
test2 = np.loadtxt(cv2 / "basins_test.txt", dtype="str")
test = np.append(test1, test2)
np.savetxt(store_folder / "basins_test.txt", test, fmt="%s")
for i in range(k):
train1, train2 = (
np.loadtxt(cv1 / f"{i}" / "basins_train.txt", dtype="str"),
np.loadtxt(cv2 / f"{i}" / "basins_train.txt", dtype="str"),
)
train = np.append(train1, train2)
cv_folder = store_folder / f"{i}"
cv_folder.mkdir(exist_ok=True)
np.savetxt(cv_folder / "basins_train.txt", train, fmt="%s")
if normalize:
create_normalization_file(
camels_root=camels_root,
dataset=dataset,
train_basin_list=cv_folder / "basins_train.txt",
timeseries=timeseries,
)
val1, val2 = (
np.loadtxt(cv1 / f"{i}" / "basins_val.txt", dtype="str"),
np.loadtxt(cv2 / f"{i}" / "basins_val.txt", dtype="str"),
)
val = np.append(val1, val2)
np.savetxt(cv_folder / "basins_val.txt", val, fmt="%s")
def create_normalization_file(
camels_root: Union[str, Path],
train_basin_list: Path,
dataset: List[str],
timeseries: List[str],
):
basin_list = get_basin_list(train_basin_list)
mean = np.zeros(len(timeseries)).reshape(1, -1)
mean_squared = np.zeros_like(mean)
length = 0
for i, basin in enumerate(tqdm(basin_list)):
forcing, _ = load_forcing(camels_root, basin, dataset=dataset)
forcing = forcing[timeseries]
discharge = load_discharge(camels_root, basin, _, dataset=dataset)
if i == 0:
mean = pd.DataFrame(mean, columns=forcing.columns)
mean["discharge"] = np.array([0])
mean_squared = | pd.DataFrame(mean_squared, columns=forcing.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
from utils import *
import matplotlib.pyplot as plt
DATA_PATH = "./data/EVconsumption/"
d1 = | pd.read_csv(DATA_PATH + "data_0_joined_data.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
from vivarium import InteractiveContext
from vivarium.framework.randomness import choice
from vivarium.framework.state_machine import Machine, State, Transition
def _population_fixture(column, initial_value):
class PopFixture:
@property
def name(self):
return f"test_pop_fixture_{column}_{initial_value}"
def setup(self, builder):
self.population_view = builder.population.get_view([column])
builder.population.initializes_simulants(self.inner, creates_columns=[column])
def inner(self, pop_data):
self.population_view.update(pd.Series(initial_value, index=pop_data.index))
return PopFixture()
def _even_population_fixture(column, values):
class pop_fixture:
@property
def name(self):
return "test_pop_fixture"
def setup(self, builder):
self.population_view = builder.population.get_view([column])
builder.population.initializes_simulants(self.inner, creates_columns=[column])
def inner(self, pop_data):
self.population_view.update(choice('start', pop_data.index, values))
return pop_fixture()
def test_transition():
done_state = State('done')
start_state = State('start')
start_state.add_transition(done_state)
machine = Machine('state', states=[start_state, done_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')])
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
assert np.all(simulation.get_population().state == 'done')
def test_choice(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
b_state = State('b')
start_state = State('start')
start_state.add_transition(a_state, probability_func=lambda agents: np.full(len(agents), 0.5))
start_state.add_transition(b_state, probability_func=lambda agents: np.full(len(agents), 0.5))
machine = Machine('state', states=[start_state, a_state, b_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')],
configuration=base_config)
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
a_count = (simulation.get_population().state == 'a').sum()
assert round(a_count/len(simulation.get_population()), 1) == 0.5
def test_null_transition(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
start_state = State('start')
start_state.add_transition(a_state, probability_func=lambda agents: np.full(len(agents), 0.5))
start_state.allow_self_transitions()
machine = Machine('state', states=[start_state, a_state])
simulation = InteractiveContext(components=[machine, _population_fixture('state', 'start')],
configuration=base_config)
event_time = simulation._clock.time + simulation._clock.step_size
machine.transition(simulation.get_population().index, event_time)
a_count = (simulation.get_population().state == 'a').sum()
assert round(a_count/len(simulation.get_population()), 1) == 0.5
def test_no_null_transition(base_config):
base_config.update({'population': {'population_size': 10000}})
a_state = State('a')
b_state = State('b')
start_state = State('start')
a_transition = Transition(start_state, a_state, probability_func=lambda index: | pd.Series(0.5, index=index) | pandas.Series |
from __future__ import division
from laspy.file import File
import numpy as np
import pandas as pd
import time, math
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.func_name, (time2-time1)*1000.0))
return ret
return wrap
@timing
def loadLAS2XYZ(filepath):
'''
Function to load in console the pointcloud of a LAS file
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
print('Data loaded')
return coords
@timing
def loadLAS2XYZAIR(filepath):
'''
Function to load in console the pointcloud of a LAS file with points attributes
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z, inFile.amplitude, inFile.Intensity, inFile.reflectance, inFile.num_returns)).transpose()
print('Data loaded')
return coords
def xyz2binarray(xyz, xstart, xend, ystart, yend, nx=1000, ny=1000, method='min'):
'''
Function to extract projected grid on the XY-plane of point cloud statistics
:param xyz: a 3 column vector containing the point location in cartesian coordinate system
:param xstart: x-minimum of the grid
:param xend: x-maximum of the grid
:param ystart: y-minimm of the grid
:param yend: y-maximum of the grid
:param nx: number of grid cell in the x directions
:param ny: number of grid cell in the y directions
:param method: statistics to extract from each gridcell
:return: returns a 2D array, xmin, and ymax
TO IMPLEMENT:
- being able to choose to input dx dy instead of nx ny
'''
binned, bins_x, bins_y, bin_xmin, bin_ymin = binData2D(xyz, xstart, xend, ystart, yend, nx, ny)
if method == 'min':
ret = binned.Z.min().unstack().T # .iloc[::-1]
elif method == 'max':
ret = binned.Z.max().unstack().T # .iloc[::-1]
elif method == 'mean':
ret = binned.Z.mean().unstack().T # .iloc[::-1]
elif method == 'median':
ret = binned.Z.median().unstack().T # .iloc[::-1]
elif method == 'count':
ret = binned.Z.count().unstack().T # .iloc[::-1]
xmin = bins_x[ret.columns.min().astype(int)]
ymax = bins_y[ret.index.get_values().max().astype(int)]
newIndy = np.arange(ret.index.get_values().min(), ret.index.get_values().max() + 1)
newIndx = np.arange(ret.columns.min(), ret.columns.max() + 1)
a = ret.reindex(newIndy, newIndx)
mat = np.zeros((ny, nx)) * np.nan
mat[bin_ymin:bin_ymin + a.shape[0], bin_xmin:bin_xmin + a.shape[1]] = a
return mat[::-1], xmin, ymax
def LAS2txt(filepath,newfile):
'''
Function to convert a pointcloud save in LAS format into a .txt format
:param filepath: filepath of the LAS file
:param newfile: name of the new file
:return: save data into a text file
'''
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
if newfile[-4] != '.txt':
newfile = newfile + '.txt'
np.savetxt(newfile,coords)
print('File saved: ' + newfile)
def xyz_subsample(xyz, length_out):
'''
Function to subsample a 3 columm matrix.
:param xyz: 3 column matrix
:param length_out: number of sample to output
:return: a 3 column matrix
'''
ind = np.random.randint(0,xyz.shape[0],length_out)
xyz_new = xyz[ind,:]
print('xyz subsampled!')
return xyz_new
def xyz_stat(xyz):
print('Shape of array: ' + str(xyz.shape))
print('Min of xyz: ')
print(np.min(xyz, axis=0))
print('Max of xyz: ')
print(np.max(xyz, axis=0))
print('Mean of xyz: ')
print(np.mean(xyz, axis=0))
print('Extent')
print(np.max(xyz, axis=0)-np.min(xyz, axis=0))
def trans(xyz,trans_vec):
'''
Function to translate an xyz 3 column matrix
:param xyz: a 3 column matrix
:param trans_vec: a translation vector of length 3
:return: a 3 column matrix translated
'''
xyz[:,0] = xyz[:,0] - trans_vec[0]
xyz[:,1] = xyz[:,1] - trans_vec[1]
xyz[:,2] = xyz[:,2] - trans_vec[2]
return xyz
def translate_coords(coords, xyz_trans = None ,ask = True):
'''
Function to translate a point cloud
:param coords: an xyz array
:param xyz_trans: vector of translation in [x,y,z]
:param ask: if True (default) brings an interactive console for approving the translation
:return: translated xyz array
'''
if xyz_trans is None:
xyz_trans = [coords[:,0].min(), coords[:,1].min(), coords[:,2].min()]
if ask is True:
print('Default translation:')
print(str(xyz_trans) + '\n')
res = input('Do you want to translate? 0/1')
if res is 0:
print('No Translation applied')
return None
if res is 1:
return trans(coords, xyz_trans)
if ask is not True:
return trans(coords, xyz_trans)
def truncate(xyz, Xextent, Yextent):
'''
Function to truncate a point cloud with a rectangular shape
:param xyz: a 3 column matrix containing the points coordinate
:param Xextent: a vector of Xmin and Xmax (e.g. [Xmin,Xmax])
:param Yextent: a vector of Ymin and Ymax (e.g. [Ymin, Ymax])
:return: a 3 colum matrix containing the points coordiante within the specified rectangle
'''
xcut = xyz[xyz[:,0]>=Xextent[0]]
xcut1 = xcut[xcut[:,0]<Xextent[1]]
ycut = xcut1[xcut1[:,1]>=Yextent[0]]
ycut1 = ycut[ycut[:,1]<Yextent[1]]
return ycut1
def cart2cyl(xyz, xy_axis=None):
'''
function to convert cartesian coordinates to cylindrical
:param xyz: a 3-column matrix containing the points coordinates expressed in a cartesian system
:param xy_axis: an array of x and y coordinate for the center of the new cylindrical coordinate
:return: a 3 colum matrix with the point coordinates are expressed in a cylindrical coordinate system
'''
if xy_axis is not None:
xyz[:,0] = xyz[:,0] - xy_axis[0]
xyz[:,1] = xyz[:,1] - xy_axis[1]
rho = np.sqrt(xyz[:,0]**2 + xyz[:,1]**2)
phi = np.arctan2(xyz[:,1], xyz[:,0])
rpz = np.vstack((rho,phi,xyz[:,2]))
return rpz.transpose()
def cyl2cart(rpz):
'''
convert cylindrical coordinate to cartesian
:param rpz: a 3-column matrix containing the points coordinates expressed in a cylindrical system
:return: a 3-column matrix containing the points coordinates expressed in a cartesian system
'''
x = rpz[:,0] * np.cos(rpz[:,1])
y = rpz[:,0] * np.sin(rpz[:,1])
xyz = np.vstack((x,y,rpz[:,2]))
return xyz.transpose()
def rotate_cloud(xyz, angle, center_coord=None):
'''
Function to rotate a point cloud
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param angle: angle of rotation in degrees
:param center_coord: tuple with xy coordiantes of the center of rotation. Default is None
:return: the rotated xyz point cloud
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
rpz = cart2cyl(xyz, xy_axis=center_coord)
rpz[:,1] = rpz[:,1] + angle
xyz = cyl2cart(rpz)
return xyz
def get_slice(xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of the point cloud xyz
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(xyz, (dir*math.pi/180), center_coord= center_coord)
myslice = xyz[xyz[:,0]>=-(thick/2)]
myslice = myslice[myslice[:,0]<=(thick/2)]
return myslice
def get_slice_df(df_xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of points from a dataframe
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
df = df_xyz.copy()
df_xyz=None
if center_coord is None:
center_coord = [df['x'].mean(),df['y'].mean()]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(np.array(df[['x','y','z']]), (dir*math.pi/180), center_coord = center_coord)
df[['x','y']] = xyz[:,[0,1]]
myslice = df[df.x >= - (thick / 2)]
myslice = myslice[df.x <= (thick/2)]
else:
myslice = df[df.x >= (center_coord[0] - thick / 2)]
myslice = myslice[df.x <= (center_coord[0] + thick / 2)]
myslice['x'] = myslice['x'] - center_coord[0]
myslice['y'] = myslice['y'] - center_coord[1]
print('Data Sliced')
return myslice
def center_pc_coord_df(df_xyz, center_coord=None):
if center_coord is None:
center_coord = [(df_xyz['x'].max()-df_xyz['x'].min())/2 + df_xyz['x'].min(),
(df_xyz['y'].max()-df_xyz['y'].min())/2 +df_xyz['y'].min()]
print(center_coord)
df_xyz['x'] = df_xyz['x'] - center_coord[0]
df_xyz['y'] = df_xyz['y'] - center_coord[1]
return df_xyz
@timing
def binData2D(myXYZ, xstart, xend, ystart, yend, nx, ny):
'''
Fucntion to bin a scatter point cloud (xyz) into a 2d array
:param myXYZ: xyz array containings the point cloud coordiantes
:param xstart:
:param xend:
:param ystart:
:param yend:
:param nx: number of cells along the x-axis
:param ny: number of cells along hte y-axis
:return: a group object (pandas library) with all points classified into bins
'''
# note, the division requires: from _future_ import division
x = myXYZ[:,0].ravel()
y = myXYZ[:,1].ravel()
z = myXYZ[:,2].ravel()
df = pd.DataFrame({'X' : x , 'Y' : y , 'Z' : z})
bins_x = np.linspace(xstart, xend, nx+1)
x_cuts = pd.cut(df.X,bins_x, labels=False)
bins_y = np.linspace(ystart,yend, ny+1)
y_cuts = pd.cut(df.Y,bins_y, labels=False)
bin_xmin, bin_ymin = x_cuts.min(), y_cuts.min()
print('Data cut in a ' + str(bins_x.__len__()) + ' by ' + str(bins_y.__len__()) + ' matrix')
dx = (xend - xstart)/nx
dy = (yend - ystart)/ny
print('dx = ' + str(dx) + ' ; dy = ' + str (dy))
grouped = df.groupby([x_cuts,y_cuts])
print('Data grouped, \nReady to go!!')
return grouped, bins_x, bins_y, int(bin_xmin), int(bin_ymin)
#=====================================================================
#=====================================================================
# Function in PROGRESS !!! Use at your own risk
#===================================================================
@timing
def binData3D(xyz,xstart, xend, ystart, yend, zstart, zend,nx,ny,nz):
# not ready !!!!
x = xyz[:,0].ravel()
y = xyz[:,1].ravel()
z = xyz[:,2].ravel()
df = pd.DataFrame({'X' : x , 'Y' : y , 'Z' : z})
bins_x = np.linspace(xstart,xend,nx)
x_cuts = pd.cut(df.X,bins_x, labels=False)
bins_y = np.linspace(ystart,yend,ny)
y_cuts = | pd.cut(df.Y,bins_y, labels=False) | pandas.cut |
######################################################################################
# 1.Perform data preprocessing
######################################################################################
import pandas as pd
pd.set_option('display.max_columns', 5)
def create_user_movie_df():
movie = pd.read_csv("datasets/movie_lens_dataset/movie.csv")
rating = pd.read_csv("datasets/movie_lens_dataset/rating.csv")
df = movie.merge(rating, how="left", on="movieId")
comment_counts = pd.DataFrame(df["title"].value_counts())
rare_movies = comment_counts[comment_counts["title"] <= 1000].index
common_movies = df[~df["title"].isin(rare_movies)]
user_movie_df = common_movies.pivot_table(index=["userId"], columns=["title"], values="rating")
user_movie_df.head()
return user_movie_df
user_movie_df = create_user_movie_df()
#random_user = int(pd.Series(user_movie_df.index).sample(1, random_state=45).values)
random_user = 108170
######################################################################################
# 2.Determine the movies watched by the user to be suggested
######################################################################################
random_user = 108170
random_user_df = user_movie_df[user_movie_df.index == random_user]
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
len(movies_watched)
user_movie_df.loc[user_movie_df.index == random_user, user_movie_df.columns == "Stargate (1994)"]
######################################################################################
# 3.Access the data and IDs of other users watching the same movies.
######################################################################################
movies_watched_df= user_movie_df[movies_watched]
user_movie_count = movies_watched_df.T.notnull().sum()
user_movie_count = user_movie_count.reset_index()
user_movie_count.columns = ["userId", "movie_count"]
# user_movie_count[user_movie_count["movie_count"] > 20].sort_values(by="movie_count", ascending = False)
perc = len(movies_watched) * 70 / 100
user_movie_count[user_movie_count["movie_count"] == 33].count()
# users_same_movies = user_movie_count[user_movie_count["movie_count"] > 20]["userId"]
users_same_movies = user_movie_count[user_movie_count["movie_count"] > perc]["userId"]
users_same_movies.head()
users_same_movies.count()
######################################################################################
# 4.Identify the users who are most similar to the user for whom the movie will be recommended
######################################################################################
# 1. Combine random user and other users' data
# 2. Create correlation df
# 3. Find most similar users(top users)
final_df = pd.concat([movies_watched_df[movies_watched_df.index.isin(users_same_movies)], random_user_df[movies_watched]])
final_df.head()
final_df.T.corr().head()
corr_df = final_df.T.corr().unstack().sort_values().drop_duplicates()
corr_df = pd.DataFrame(corr_df, columns=["corr"])
corr_df.index.names = ['user_id_1', 'user_id_2']
corr_df = corr_df.reset_index()
top_users = corr_df[(corr_df["user_id_1"] == random_user) & (corr_df["corr"] >= 0.70)][["user_id_2", "corr"]].reset_index(drop=True)
top_users.sort_values(by='corr', inplace=True, ascending=False)
top_users.rename(columns={"user_id_2": "userId"}, inplace=True)
rating = pd.read_csv('datasets/movie_lens_dataset/rating.csv')
top_users_ratings = top_users.merge(rating[["userId", "movieId", "rating"]], how="inner")
top_users_ratings = top_users_ratings[top_users_ratings["userId"] != random_user]
######################################################################################
# 5.Calculate the Weighted Average Recommendation Score and keep the first 5 movies
######################################################################################
top_users_ratings['weighted_rating'] = top_users_ratings['corr'] * top_users_ratings['rating']
top_users_ratings.head()
recommendation_df = top_users_ratings.groupby('movieId').agg({"weighted_rating": "mean"})
recommendation_df.reset_index(inplace=True)
recommendation_df[["movieId"]].nunique()
movies_to_be_recommend = recommendation_df[recommendation_df["weighted_rating"] > 3.6].sort_values("weighted_rating", ascending=False)
movie = pd.read_csv('datasets/movie_lens_dataset/movie.csv')
movies_to_be_recommend.head(5).merge(movie[["movieId", "title"]])
######################################################################################
# 6.Make an item-based suggestion based on the name of the movie that the user has watched last and gave the highest score
# Make 10 suggesstions
# 5 suggestions user-based
# 5 suggestions item-based
######################################################################################
movie = pd.read_csv('datasets/movie_lens_dataset/movie.csv')
rating = | pd.read_csv('datasets/movie_lens_dataset/rating.csv') | pandas.read_csv |
import requests
import pandas as pd
import math
pd.options.display.max_rows = 999
class Schedule:
def __init__(self, season="",eventCode="",tournamentLevel="",Exel_File=None):
if not Exel_File is None:
self.__df = pd.read_csv(Exel_File)
else:
self.__df = None
self._season = season
self._eventCode = eventCode
headers = {"Authorization":"Basic Z<KEY>",'Accept': 'application/json'}
__url__ = "https://frc-api.firstinspires.org/v2.0/"
r_url = f"{__url__}{season}/schedule/{eventCode}?tournamentLevel={tournamentLevel}"
r = requests.get(r_url,headers=headers)
self.data_r = r.json()['Schedule']
def get_red_and_blue_teams_at_match(self,matchNumber):
red_teams = []
blue_teams = []
if matchNumber < len(self.data_r):
for team in self.data_r[matchNumber]['teams']:
if "Red" in team['station']:
red_teams.append(team)
elif "Blue" in team['station']:
blue_teams.append(team)
return red_teams,blue_teams
def __fix_match(self,matchNumber,red_teams,blue_teams):
json_data = {"Match Number": matchNumber}
for team in red_teams + blue_teams:
json_data[team['station']] = team['teamNumber']
return json_data
def fix_match(self,matchNumber,teams):
red_teams,blue_teams = teams
json_data = {"Match Number": matchNumber}
for team in red_teams + blue_teams:
json_data[team['station']] = team['teamNumber']
return json_data
def get_all_matches_in_datafarme(self):
_data = []
for i in range(len(self.data_r)):
red,blue = self.get_red_and_blue_teams_at_match(i)
_data.append(self.__fix_match(i+1,red,blue))
df = | pd.DataFrame(_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from glob import glob
def calc_rocof(data, smooth_window_size, lookup_window_size, method='increment_smoothing'):
if data.index[0].minute!=0 or data.index[0].second!=0:
print('Data is not starting with full hour!')
return None
full_hours = data.index[::3600]
full_hours = full_hours[1:-1]
result = pd.Series(index = full_hours)
if method=='frequency_smoothing':
for i in np.arange(len(full_hours)):
smoothed_snipped = data.iloc[i*3600:(i+2)*3600].rolling(smooth_window_size, center=True).mean()
df_dt = smoothed_snipped.diff(periods=5).iloc[3600-lookup_window_size:3600+lookup_window_size]
if df_dt.isnull().any():
result.iloc[i]=np.nan
else:
result.iloc[i] = df_dt[df_dt.abs().idxmax()] / 5.
if method=='increment_smoothing':
for i in np.arange(len(full_hours)):
df_dt = data.iloc[i*3600:(i+2)*3600].diff().rolling(smooth_window_size , center=True).mean()
df_dt = df_dt.iloc[3600-lookup_window_size:3600+lookup_window_size]
if df_dt.isnull().any():
result.iloc[i]=np.nan
else:
result.iloc[i] = df_dt[df_dt.abs().idxmax()]
return result
def make_frequency_data_hdf(path_to_frequency_csv, tso_name, frequency_hdf_folder, start_time, end_time, time_zone, delete_existing_hdf=False):
print('\nConverting frequency data to hdf ', tso_name, '...')
year_index = pd.date_range(start=start_time, end=end_time, freq='Y')
if not os.path.exists(frequency_hdf_folder):
os.makedirs(frequency_hdf_folder)
hdf_file = glob(frequency_hdf_folder + 'cleansed*.h5')
if not hdf_file or delete_existing_hdf:
data = | pd.Series(dtype=np.float) | pandas.Series |
import os
import numpy as np
import pandas as pd
from pyomo import environ as pe
from hatchet.utils.solve.ilp_subset import ILPSubset
from hatchet.utils.solve.cd import CoordinateDescent
from hatchet.utils.solve.utils import parse_clonal, scale_rdr
from hatchet import config
def solver_available(solver=None):
solver = solver or config.compute_cn.solver
if solver == 'cpp':
return os.getenv('GRB_LICENSE_FILE') is not None
elif solver == 'gurobipy':
return pe.SolverFactory('gurobi', solver_io='python').available(exception_flag=False)
return pe.SolverFactory(solver).available(exception_flag=False)
def solve(clonal, seg_file, n, solver='gurobi', solve_mode='cd', d=-1, cn_max=-1, mu=0.01, diploid_threshold=0.1,
ampdel=True, n_seed=400, n_worker=8, random_seed=None, max_iters=None, timelimit=None):
assert solve_mode in ('ilp', 'cd', 'both'), 'Unrecognized solve_mode'
assert solver_available(solver), f'Solver {solver} not available or not licensed'
if max_iters is None:
max_iters = 10
df = pd.read_csv(seg_file, sep='\t')
df = df.sort_values(['#ID', 'SAMPLE'])
# sanity-check
sample_ids = np.sort(df['SAMPLE'].unique())
for _cluster_id, _df in df.groupby('#ID'):
_sample_ids = _df['SAMPLE'].values
if not np.all(_sample_ids == sample_ids):
raise ValueError(f'Sample IDs for cluster {_cluster_id} do not match {sample_ids}')
rdr = df.pivot(index='#ID', columns='SAMPLE', values='RD')
baf = df.pivot(index='#ID', columns='SAMPLE', values='BAF')
bins = {} # cluster_id => no. of bins
for cluster_id, _df in df.groupby('#ID'):
if len(_df['#BINS'].unique()) != 1:
raise ValueError(f'Bin sizes for cluster {cluster_id} across tumor samples are not identical!')
bins[cluster_id] = _df.iloc[0]['#BINS']
bins = | pd.Series(bins) | pandas.Series |
import os,sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from analysis import subset_probas
import plotly.graph_objects as go
fewlabels = ['Applause',
'Bird vocalization, bird call, bird song',
'Caw',
'Bee, wasp, etc.',
'Wind noise (microphone)','Rain','Vehicle','Silence']
def average_proba_over_freq(Df,freq_str='D',subset_labels=fewlabels):
"""
Calculates probability density estimates over a configurable frequency
arguments :
Df : DataFrame, output of tag_silentcities
freq_str : Frequency over which to calculate probability density estimate (default : days)
subset_labels : Subset of labels from the Audioset Ontology to be used for the estimate.
default labels are :
'Applause','Bird vocalization, bird call, bird song','Chirp, tweet','Pigeon, dove',
'Caw','Bee, wasp, etc.','Wind noise (microphone)','Rain','Vehicle','Emergency vehicle','Rail transport',
'Aircraft','Silence'
outputs :
probas_agg : Probability Density estimates of the subset of labels calculate according to the frequency specified
"""
#Let's use the datetime (Timestamp) as a new index for the dataframe
ts = pd.DatetimeIndex(Df.datetime)
Df.index = ts
# Let's add the Labels from the shortlist as entries in the Df. Will be easier to manipulate them afterwards
prob = subset_probas(Df,subset_labels)
for f,curlabel in enumerate(subset_labels):
Df[curlabel] = prob[:,f]
# Now let's create a period range to easily compute statistics over days (frequency can be changed by changing the freq_str argument)
prng = | pd.period_range(start=ts[0],end=ts[-1], freq=freq_str) | pandas.period_range |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in_old(slide, label, root_dir):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 1000
else:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(slide, label, root_dir, age=None, BMI=None):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 2000
else:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
idsa['age'] = age
idsa['BMI'] = BMI
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
return idsa
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = pd.concat([balanced, ref], sort=False)
return balanced
# Prepare label at per patient level
def big_image_sum(pmd, path='../tiles/', ref_file='../Fusion_dummy_His_MUT_joined.csv'):
ref = pd.read_csv(ref_file, header=0)
big_images = []
if pmd == 'subtype':
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
if row['subtype_POLE'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_MSI'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Endometrioid'] == 1:
big_images.append([row['name'], 2, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Serous-like'] == 1:
big_images.append([row['name'], 3, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd == 'histology':
ref = ref.loc[ref['histology_Mixed'] == 0]
for idx, row in ref.iterrows():
if row['histology_Endometrioid'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
if row['histology_Serous'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
# ref = ref.loc[ref['histology_Endometrioid'] == 1]
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['subtype_{}'.format(pmd)]), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
elif pmd == 'MSIst':
ref = ref.loc[ref['MSIst_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['MSIst_MSI-H']), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
else:
ref = ref.dropna(subset=[pmd])
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row[pmd]), path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'age', 'BMI'])
return datapd
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(alll, path, cls, pmd, batchsize=24):
if pmd == 'subtype':
split = pd.read_csv('../split/ST.csv', header=0)
elif pmd == 'histology':
split = pd.read_csv('../split/his.csv', header=0)
elif pmd == 'Serous-like':
split = pd.read_csv('../split/CNVH.csv', header=0)
elif pmd == 'Endometrioid':
split = pd.read_csv('../split/CNVL.csv', header=0)
else:
split = pd.read_csv('../split/{}.csv'.format(pmd), header=0)
train = split.loc[split['set'] == 'train']['slide'].tolist()
validation = split.loc[split['set'] == 'validation']['slide'].tolist()
test = split.loc[split['set'] == 'test']['slide'].tolist()
trlist = []
telist = []
valist = []
subset = alll
valist.append(subset[subset['slide'].isin(validation)])
telist.append(subset[subset['slide'].isin(test)])
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# Training and validation on TCGA; Testing on CPTAC
def set_sep_idp(alll, path, cls, cut=0.1, batchsize=64):
trlist = []
telist = []
valist = []
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut)]
valist.append(subset[subset['slide'].isin(validation)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
telist.append(CPTAC)
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = | pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI']) | pandas.DataFrame |
import sys
import datetime
import warnings
warnings.simplefilter(action='ignore', category=RuntimeWarning)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
plt.rcParams['figure.figsize'] = 15,7
plt.rcParams['font.size'] = 8
#plt.style.use('ggplot')
plt.style.use('default')
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn import tree
import pymongo
from pymongo import MongoClient
from nowtrade.data_connection import MongoDatabaseConnection
#from tesjualbeli import get_orders, order
def analisis():
nama_db = 'symbol-data'
nama_db_price = 'hasil-analisis'
nama_saham = [x.strip() for x in open('saham_list.txt', 'r').read().split('#')]
#nama_saham = 'NVDA'
client = MongoClient('localhost', 27017)
db = client[nama_db]
db_price = client[nama_db_price]
mdb_hasil_analisis = []
for dfq in nama_saham:
df = pd.DataFrame()
df = pd.DataFrame(list(db[dfq].find()))
mdb_df = df
mdb_df['INSERT_TIME'] = datetime.datetime.now()
df = df.drop(columns=['INSERT_TIME'])
df.rename(columns={'open': 'Open',
'high': 'High',
'low': 'Low',
'close': 'Close',
'volume': 'Volume',
'adj_close': 'Adj Close',
'_id': 'Date'},
inplace=True)
df['Symbol'] = dfq
df['Symbol'] = df['Symbol'].astype('string')
df['change_in_price'] = df['Close'].diff()
hargas = df['Close']
df_date = df['Date']
df = df.set_index('Date')
#df.sort_values(by = ['Symbol','Date'], inplace = True)
price_data = df[df['Volume'] != 0]
price_data.tail()
#trend = 0
print('\nSIMBOL : ',dfq)
#price_data = pd.read_csv(df)
### RSI ###
n = 8
up_df, down_df = price_data[['change_in_price']].copy(), price_data[['change_in_price']].copy()
up_df.loc['change_in_price'] = up_df.loc[(up_df['change_in_price'] < 0), 'change_in_price'] = 0
down_df.loc['change_in_price'] = down_df.loc[(down_df['change_in_price'] > 0), 'change_in_price'] = 0
down_df['change_in_price'] = down_df['change_in_price'].abs()
ewma_up = up_df['change_in_price'].transform(lambda x: x.ewm(span = n).mean())
ewma_down = down_df['change_in_price'].transform(lambda x: x.ewm(span = n).mean())
relative_strength = ewma_up / ewma_down
relative_strength_index = 100.0 - (100.0 / (1.0 + relative_strength))
price_data['down_days'] = down_df['change_in_price']
price_data['up_days'] = up_df['change_in_price']
price_data['RSI'] = relative_strength_index
threshold_up = 80 #REVISI###################
threshold_down = 20
##########
### STOCHASTIC ###
n = 8
low_4, high_4 = price_data[['Low']].copy(), price_data[['High']].copy()
low_4 = low_4['Low'].transform(lambda x: x.rolling(window = n).min())
high_4 = high_4['High'].transform(lambda x: x.rolling(window = n).max())
k_percent = 100 * ((price_data['Close'] - low_4) / (high_4 - low_4))
price_data['Low_Sto'] = low_4
price_data['High_Sto'] = high_4
price_data['K_percent'] = k_percent
##########
### WILLIAMS %R ###
n = 8
low_8, high_8 = price_data[['Low']].copy(), price_data[['High']].copy()
low_8 = low_8['Low'].transform(lambda x: x.rolling(window = n).min())
high_8 = high_8['High'].transform(lambda x: x.rolling(window = n).max())
r_percent = ((high_8 - price_data['Close']) / (high_8 - low_8)) * - 100
price_data['R_percent'] = r_percent
threshold_up_r = -20
threshold_down_r = -80
###########
### MACD ###
n = 8
ema_26 = price_data['Close'].transform(lambda x: x.ewm(span = 26).mean())
ema_12 = price_data['Close'].transform(lambda x: x.ewm(span = 12).mean())
macd = ema_12 - ema_26
ema_8_macd = macd.ewm(span = n).mean()
price_data['MACD'] = macd
price_data['MACD_EMA'] = ema_8_macd
price_data['MACD_DIFF'] = price_data['MACD'] - price_data['MACD'].shift()
threshold_up_macd = 0.07
threshold_down_macd = -0.07
##########
closing = price_data['Close']
#data = closing.drop(closing.tail(1).index)
#second_newest = closing.iloc[:2]
#newest = closing.iloc[:1]
#days_out = 30
#price_data["label"] = [1 if x > newest else -1 if x < newest else 0]
closing = closing.transform(lambda x : np.sign(x.diff()))
#closing = closing.transform(lambda x : x.shift(1) < x)
price_data['Prediction'] = closing
price_data['RSI_Trend'] = [ 1 if x >= threshold_up else -1 if x < threshold_down else 0 for x in price_data['RSI']]
price_data['STO_Trend'] = [ 1 if x >= threshold_up else -1 if x < threshold_down else 0 for x in price_data['K_percent']]
price_data['W%R_Trend'] = [ 1 if x >= threshold_up_r else -1 if x < threshold_down_r else 0 for x in price_data['R_percent']]
price_data['MACD_Trend'] = [ 1 if x >= threshold_up_macd else -1 if x < threshold_down_macd else 0 for x in price_data['MACD_DIFF']]
price_data['ALL_TRENDS'] = price_data['RSI_Trend'] + price_data['STO_Trend'] + price_data['W%R_Trend'] + price_data['MACD_Trend']
#price_data.dtypes
price_data['DECISION'] = [ 1 if x > 1 else -1 if x < -1 else 0 for x in price_data['ALL_TRENDS']]
raee = price_data['DECISION']
raeee = raee.iloc[-1]
price_data = price_data.dropna()
####################################### RANDOM FOREST ##################################
X_Cols = price_data[['RSI', 'MACD', 'K_percent', 'R_percent', 'MACD_DIFF', 'MACD_EMA', 'Open', 'High', 'Low', 'Close', 'Volume']]
L = X_Cols
print(L.shape)
#P = price_data
Y_Cols = price_data['Prediction']
#price_data['RF'] = ''
#train_size = int(X_Cols.shape[0]*0.7)
#X_train = X_Cols[:train_size]
#X_test = X_Cols[train_size:]
#y_train = Y_Cols[:train_size]
#y_test = Y_Cols[train_size:]
X_train, X_test, y_train, y_test = train_test_split(X_Cols, Y_Cols, train_size = 0.6)
model = RandomForestClassifier(n_estimators = 100, criterion = 'gini', max_depth = 5, bootstrap = True)
model.fit(X_train, y_train)
x_pred_test = model.predict(X_test)
x_pred_train = model.predict(X_train)
y_pred_test = y_test
y_pred_train = y_train
df1 = pd.DataFrame(x_pred_train[:, None], columns = ['RF_Train']) #index=df_date.index)
df2 = pd.DataFrame(x_pred_test[:, None], columns = ['RF_Train'])
dff = | pd.concat([df1, df2]) | pandas.concat |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@file:analyse_data.py
@author: Honour-Van: <EMAIL>
@date:2021/04/27 15:48:18
@description:analyse the data we have gotten, with 3 tasks
@version:1.0
'''
import pandas as pd
import json
def is_prov(line: str) -> bool:
"""
# is_prov
@Description:
a level judge function.
to see if a line of record is referred to a province
---------
@Param:
a line of record in StatData.txt
-------
@Returns:
a bool value if it's a province
-------
"""
return True if line[0] != '\t' else False
def is_base(line: str) -> bool:
"""
# is_base
@Description:
a level judge function.
to see if a line of record is referred to a childest node
---------
@Param:
a line of record in StatData.txt
-------
@Returns:
a bool value if it's a grassroot unit
-------
"""
return True if (line[-4:-1]).isdigit() else False
def place_name(line: str) -> str:
"""
# place_name
@Description:
extract the name of place in a line of record in StatData.txt
---------
@Param:
a line of record in StatData.txt
-------
@Returns:
a Chinese word str: name of place
-------
"""
valid_char_beg = line.rfind('\t') + 1
return line[valid_char_beg+12:-4] if is_base(line) else line[valid_char_beg+12:-1]
if __name__ == "__main__":
# temporary tool-use data structures
dt1 = pd.DataFrame(columns=["111", "112", "121",
"122", "123", "210",
"220"])
task1 = {}
task2 = {"河南省": {}, "内蒙古自治区": {}}
f = open("assets/family_name.json") # the 100 family names are transferred by familyname2json.py
task3 = json.load(f)
f.close()
# task2 prov_name filter tools
prov_name = ""
prov_cnt = False
# task3 level filter tools: to see if it's the last but one level
last_name = ""
sub_base_flag = True
with open("StatData.txt", encoding='utf-8') as f:
for line in f.readlines():
if is_prov(line):
# task1 started to count kinds of different id codes
if task1: # to see if the dict is empty
dt1 = dt1.append(pd.Series(task1, name=prov_name))
task1 = {"111": 0, "112": 0, "121": 0,
"122": 0, "123": 0, "210": 0, "220": 0}
# task2 started to select the henan and inner mongolia
prov_name = place_name(line)
if prov_name == "河南省" or prov_name == "内蒙古自治区":
prov_cnt = True
else:
prov_cnt = False
elif is_base(line):
# task1: in order to get the id codes of grassroot units
task1[line[-4:-1]] = task1.get(line[-4:-1], 0) + 1
# task2: if the current units belong to henan or inner mongolia
if prov_cnt:
base_name = place_name(line)
if base_name[-3:] == "村委会":
for item in base_name[:-3]:
task2[prov_name][item] = task2[prov_name].get(
item, 0) + 1
# task3: count the last but one levels
if sub_base_flag:
sub_base_flag = False
if last_name[0] in task3:
task3[last_name[0]] = task3[last_name[0]] + 1
# task3: count the grassroot units
name3 = place_name(line)
if name3[0] in task3:
task3[name3[0]] = task3[name3[0]] + 1
else:
sub_base_flag = True
last_name = place_name(line)
# prepare to output
# task1
if task1: # to see if the dict is empty
dt1 = dt1.append(pd.Series(task1, name=prov_name))
dt1.rename(columns={"111": "主城区(111)", "112": "城乡结合区(112)", "121": "镇中心区(121)",
"122": "镇乡结合区(122)", "123": "特殊区域(123)", "210": "乡中心区(210)", "220": "村庄(220)"}, inplace=True)
| pd.set_option('display.unicode.ambiguous_as_wide', True) | pandas.set_option |
# Copyright(c) Microsoft Corporation.
# Licensed under the MIT license.
import pyodbc
import sys
from pandas import DataFrame
from .connectioninfo import ConnectionInfo
from .sqlbuilder import SQLBuilder
from .sqlbuilder import STDOUT_COLUMN_NAME, STDERR_COLUMN_NAME
"""This module is used to actually execute sql queries. It uses the pyodbc module under the hood.
It is mostly setup to work with SQLBuilder objects as defined in sqlbuilder.
"""
# This function is best used to execute_function_in_sql a one off query
# (the SQL connection is closed after the query completes).
# If you need to keep the SQL connection open in between queries, you can use the _SQLQueryExecutor class below.
def execute_query(builder, connection: ConnectionInfo, out_file:str=None):
with SQLQueryExecutor(connection=connection) as executor:
return executor.execute(builder, out_file=out_file)
def execute_raw_query(conn: ConnectionInfo, query, params=()):
with SQLQueryExecutor(connection=conn) as executor:
return executor.execute_query(query, params)
class SQLQueryExecutor:
"""_SQLQueryExecutor objects keep a SQL connection open in order to execute_function_in_sql one or more queries.
This class implements the basic context manager paradigm.
"""
def __init__(self, connection: ConnectionInfo):
self._connection = connection
def execute(self, builder: SQLBuilder, out_file=None):
return self.execute_query(builder.base_script, builder.params, out_file=out_file)
def execute_query(self, query, params, out_file=None):
df = | DataFrame() | pandas.DataFrame |
# Predicting Cognitive Impairment by <NAME>.
# www.github.com/kkphd/mmse
import Step_1_MMSE_Data_Cleaning
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import pprint
analysis_results = Step_1_MMSE_Data_Cleaning.run_analysis()
# Perform exploratory data analysis by first filtering for visit 1 cases ("v1") and sub-setting the groups.
# Group membership is based on their cognitive test score.
v1 = analysis_results['Visit'] == 1
v1_all = analysis_results[v1]
has_unique_cases = v1_all.duplicated('ID').sum() == 0
is_null = v1_all.isnull().sum()
v1_all_nrow = len(v1_all)
v1_all_ncol = len(v1_all.columns)
intact = v1_all['MMSE_Group'] == 'Intact'
v1_intact = v1_all[intact]
impaired = v1_all['MMSE_Group'] == 'Impaired'
v1_impaired = v1_all[impaired]
# Determine value counts of Intact and Impaired individuals.
v1_all['MMSE_Group'].value_counts()
# Intact: N = 124
# Impaired: N = 26
# Determine the types of variables we are working with.
v1_all.info()
# Exploratory data analysis:
group_agree = v1_all['Group_Agreement'].mean()*100
def v1(v1_all):
v1_desc = v1_all.groupby('MMSE_Group').agg(
{
'MF_01': ['count', 'mean', 'std', 'min', 'max', 'median'],
'Age': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'Edu': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'MMSE': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'MMSE_T': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'MMSE_Percentile': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'CDR': ['count', 'mean', 'std', 'min', 'max', 'median'],
'eTIV': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'nWBV': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew'],
'ASF': ['count', 'mean', 'std', 'min', 'max', 'median', 'skew']
}
)
return np.transpose(v1_desc)
def create_group_plots():
fig1, ax1 = plt.subplots(2, 2, figsize=(16, 10))
ax1[0, 0].set_title('Distribution of Age by Group', size='14')
ax1[0, 1].set_title('Distribution of Education by Group', size='14')
ax1[1, 0].set_title('Distribution of Cognitive Status', size='14')
ax1[1, 1].set_title('Distribution of CDR Stage', size='14')
fig1.subplots_adjust(hspace=0.3)
sns.set(style='whitegrid', palette='muted')
age_order = ['60-64', '65-69', '70-74', '75-79', '80-84', '>=85']
edu_order = ['5-8', '9-12', '>12']
age_fig = sns.countplot(data=v1_all, x='Age_Group', hue='MMSE_Group', ax=ax1[0, 0], order=age_order)
edu_fig = sns.countplot(data=v1_all, x='Edu_Group', hue='MMSE_Group', ax=ax1[0, 1], order=edu_order)
dem_fig = sns.countplot(data=v1_all, x='MMSE_Label', ax=ax1[1, 0])
cdr_fig = sns.countplot(data=v1_all, x='CDR_Stage', ax=ax1[1, 1])
age_fig.set_xlabel('Age Group')
edu_fig.set_xlabel('Years of Education')
dem_fig.set_xlabel('Level of Cognitive Impairment')
cdr_fig.set_xlabel('CDR Stage')
create_group_plots()
# The data suggests we have an imbalanced sample favoring normals.
def freq_figures():
fig2, ax2 = plt.subplots(1, 2, figsize=(12, 6))
fig2.suptitle('Number of Participants per Group', size='16')
sns.set(style='whitegrid', palette='muted')
sns.countplot(data=v1_all, x='Group', ax=ax2[0])
sns.countplot(data=v1_all, x='MMSE_Group', ax=ax2[1])
freq_figures()
if has_unique_cases:
v1_eda = v1(v1_all)
v1_eda = v1_eda.rename(columns={'MMSE_Group': 'Group'},
index={'MF_01': 'Sex', 'Edu': 'Education (years)', 'MMSE_T': 'MMSE T Score'})
print(v1_eda)
pprint.pprint(v1_eda)
# Variables were considered skewed if it was ~ < -1 or ~ > 1.
# Intact: MMSE, MMSE_Percentile, and eTIV were moderately skewed.
# Impaired: Education was moderately skewed.
# Test for normality of continuous variables by performing the Shapiro-Wilk test.
def calc_shapiro():
shapiro_all = {}
shapiro_intact = {}
shapiro_impaired = {}
shapiro_columns = ['Age', 'Edu', 'MMSE', 'MMSE_T', 'MMSE_Percentile', 'eTIV', 'nWBV', 'ASF']
for column in shapiro_columns:
shapiro_all[column] = stats.shapiro(v1_intact[column])
shapiro_intact[column] = stats.shapiro(v1_intact[column])
shapiro_impaired[column] = stats.shapiro(v1_impaired[column])
return shapiro_all, shapiro_intact, shapiro_impaired
shapiro_all, shapiro_intact, shapiro_impaired = calc_shapiro()
# Intact group: Edu, MMSE, MMSE_T, MMSE_Percentile, and eTIV were not derived from a normal distribution.
# Impaired group: Edu, MMSE_T, and MMSE_Percentile were not derived from a normal distribution.
shapiro_all_df = pd.DataFrame.from_dict(shapiro_all)
shapiro_all_df = pd.DataFrame(shapiro_all, index=['Intact', 'Impaired'])
shapiro_intact_df = pd.DataFrame.from_dict(shapiro_intact)
shapiro_impaired_df = pd.DataFrame.from_dict(shapiro_impaired)
# Test for kurtosis.
def calc_kurt():
kurt_all = {}
kurt_intact = {}
kurt_impaired = {}
kurt_columns = ['MF_01', 'Age', 'Edu', 'MMSE', 'MMSE_T', 'MMSE_Percentile', 'CDR', 'eTIV', 'nWBV', 'ASF']
for column in kurt_columns:
kurt_all[column] = stats.kurtosis(v1_all[column])
kurt_intact[column] = stats.kurtosis(v1_intact[column])
kurt_impaired[column] = stats.kurtosis(v1_impaired[column])
return kurt_all, kurt_intact, kurt_impaired
kurt_all, kurt_intact, kurt_impaired = calc_kurt()
# The distributions are largely platykurtic.
kurt_all_df = | pd.DataFrame(kurt_all, index=[0]) | pandas.DataFrame |
import os
from collections import Counter
from os import listdir
from os.path import isfile, join
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from matplotlib import style
style.use('ggplot')
import scipy
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import numpy as np
from sys import argv
import Orange
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mcolors, cm
from matplotlib.collections import PolyCollection
from classifiers import classifiers_list
from datasetsDelaunay import dataset_list_bi, dataset_list_mult
from folders import output_dir, dir_pca_biclasse, metricas_biclasse, dir_pca_multiclasse, metricas_multiclasse
from parameters import order, alphas
order_dict = {'area': 1,
'volume': 2,
'area_volume_ratio': 3,
'edge_ratio': 4,
'radius_ratio': 5,
'aspect_ratio': 6,
'max_solid_angle': 7,
'min_solid_angle': 8,
'solid_angle': 9}
class Statistics:
def __init__(self):
pass
def compute_CD_customizado(self, avranks, n, alpha="0.05", test="nemenyi"):
"""
Returns critical difference for Nemenyi or Bonferroni-Dunn test
according to given alpha (either alpha="0.05" or alpha="0.1") for average
ranks and number of tested datasets N. Test can be either "nemenyi" for
for Nemenyi two tailed test or "bonferroni-dunn" for Bonferroni-Dunn test.
"""
k = len(avranks)
d = {("nemenyi", "0.05"): [1.960, 2.344, 2.569, 2.728, 2.850, 2.948, 3.031, 3.102, 3.164, 3.219, 3.268, 3.313,
3.354, 3.391, 3.426,
3.458, 3.489, 3.517, 3.544, 3.569, 3.593, 3.616, 3.637, 3.658, 3.678, 3.696, 3.714,
3.732, 3.749, 3.765,
3.780, 3.795, 3.810, 3.824, 3.837, 3.850, 3.863, 3.876, 3.888, 3.899, 3.911, 3.922,
3.933, 3.943, 3.954,
3.964, 3.973, 3.983, 3.992],
("nemenyi", "0.1"): [0, 0, 1.644854, 2.052293, 2.291341, 2.459516,
2.588521, 2.692732, 2.779884, 2.854606, 2.919889,
2.977768, 3.029694, 3.076733, 3.119693, 3.159199,
3.195743, 3.229723, 3.261461, 3.291224, 3.319233],
("bonferroni-dunn", "0.05"): [0, 0, 1.960, 2.241, 2.394, 2.498, 2.576,
2.638, 2.690, 2.724, 2.773],
("bonferroni-dunn", "0.1"): [0, 0, 1.645, 1.960, 2.128, 2.241, 2.326,
2.394, 2.450, 2.498, 2.539]}
q = d[(test, alpha)]
cd = q[k] * (k * (k + 1) / (6.0 * n)) ** 0.5
return cd
def calcula_media_folds_biclasse(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_biclasse_' + mode + '.csv', index=False)
def calcula_media_folds_multiclass(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_multiclass_' + mode + '.csv', index=False)
def separa_delaunay_biclass(self, filename):
df = pd.read_csv(filename)
list_base = []
for p in np.arange(0, len(preproc_type)):
list_base.append(df[(df['PREPROC'] == preproc_type[p])])
df_base = list_base.pop(0)
for i in np.arange(0, len(list_base)):
df_base = pd.concat([df_base, list_base[i]], ignore_index=True)
for o in order:
for a in alphas:
dfr = df[(df['ORDER'] == o)]
dfr1 = dfr[(dfr['ALPHA'] == str(a))]
df_file = pd.concat([df_base, dfr1], ignore_index=True)
df_file.to_csv('./../output_dir/result_biclass' + '_' + o + '_' + str(a) + '.csv', index=False)
def read_dir_files(self, dir_name):
f = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return f
def find_best_rank(self, results_dir, tipo):
results = self.read_dir_files(results_dir)
df = pd.DataFrame(columns=[['ARQUIVO', 'WINER']])
i = 0
for f in results:
df_temp = pd.read_csv(results_dir + f)
df.at[i, 'ARQUIVO'] = f
df.at[i, 'WINER'] = df_temp.iloc[0, 0]
i += 1
df.to_csv(output_dir + tipo)
def find_best_delaunay(self, results_dir, tipo):
df = pd.read_csv(results_dir + tipo)
i = 0
j = 0
df_best = pd.DataFrame(columns=['ARQUIVO', 'WINER'])
win = list(df['WINER'])
for w in win:
if w == 'DELAUNAY':
df_best.at[i, 'ARQUIVO'] = df.iloc[j, 1]
df_best.at[i, 'WINER'] = df.iloc[j, 2]
i += 1
j += 1
df_best.to_csv(output_dir + 'only_best_delaunay_pca_biclass_media_rank.csv')
def rank_by_algorithm(self, df, tipo, wd, reducao, order, alpha):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param delaunay_type:
:return:
'''
df_tabela = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE',
'DELAUNAY', 'RANK_DELAUNAY', 'DELAUNAY_TYPE', 'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df.to_csv(dir_pca_biclasse + reducao + '_' + tipo + '_' + order + '_' + str(alpha) + '.csv')
j = 0
for d in dataset_list_bi:
for m in metricas_biclasse:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_tabela.at[j, 'DATASET'] = d
df_tabela.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_tabela.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_tabela.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_tabela.at[j, 'DELAUNAY'] = aux.at[indice, m]
df_tabela.at[j, 'DELAUNAY_TYPE'] = order
df_tabela.at[j, 'ALPHA'] = alpha
df_tabela.at[j, 'unit'] = m
j += 1
df_pre = df_tabela[df_tabela['unit'] == 'PRE']
df_rec = df_tabela[df_tabela['unit'] == 'REC']
df_spe = df_tabela[df_tabela['unit'] == 'SPE']
df_f1 = df_tabela[df_tabela['unit'] == 'F1']
df_geo = df_tabela[df_tabela['unit'] == 'GEO']
df_iba = df_tabela[df_tabela['unit'] == 'IBA']
df_auc = df_tabela[df_tabela['unit'] == 'AUC']
pre = df_pre[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
rec = df_rec[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
spe = df_spe[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
f1 = df_f1[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
geo = df_geo[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
iba = df_iba[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
auc = df_auc[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
pre = pre.reset_index()
pre.drop('index', axis=1, inplace=True)
rec = rec.reset_index()
rec.drop('index', axis=1, inplace=True)
spe = spe.reset_index()
spe.drop('index', axis=1, inplace=True)
f1 = f1.reset_index()
f1.drop('index', axis=1, inplace=True)
geo = geo.reset_index()
geo.drop('index', axis=1, inplace=True)
iba = iba.reset_index()
iba.drop('index', axis=1, inplace=True)
auc = auc.reset_index()
auc.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
pre_rank = pre.rank(axis=1, ascending=False)
rec_rank = rec.rank(axis=1, ascending=False)
spe_rank = spe.rank(axis=1, ascending=False)
f1_rank = f1.rank(axis=1, ascending=False)
geo_rank = geo.rank(axis=1, ascending=False)
iba_rank = iba.rank(axis=1, ascending=False)
auc_rank = auc.rank(axis=1, ascending=False)
df_pre = df_pre.reset_index()
df_pre.drop('index', axis=1, inplace=True)
df_pre['RANK_ORIGINAL'] = pre_rank['ORIGINAL']
df_pre['RANK_SMOTE'] = pre_rank['SMOTE']
df_pre['RANK_SMOTE_SVM'] = pre_rank['SMOTE_SVM']
df_pre['RANK_BORDERLINE1'] = pre_rank['BORDERLINE1']
df_pre['RANK_BORDERLINE2'] = pre_rank['BORDERLINE2']
df_pre['RANK_GEOMETRIC_SMOTE'] = pre_rank['GEOMETRIC_SMOTE']
df_pre['RANK_DELAUNAY'] = pre_rank['DELAUNAY']
df_rec = df_rec.reset_index()
df_rec.drop('index', axis=1, inplace=True)
df_rec['RANK_ORIGINAL'] = rec_rank['ORIGINAL']
df_rec['RANK_SMOTE'] = rec_rank['SMOTE']
df_rec['RANK_SMOTE_SVM'] = rec_rank['SMOTE_SVM']
df_rec['RANK_BORDERLINE1'] = rec_rank['BORDERLINE1']
df_rec['RANK_BORDERLINE2'] = rec_rank['BORDERLINE2']
df_rec['RANK_GEOMETRIC_SMOTE'] = rec_rank['GEOMETRIC_SMOTE']
df_rec['RANK_DELAUNAY'] = rec_rank['DELAUNAY']
df_spe = df_spe.reset_index()
df_spe.drop('index', axis=1, inplace=True)
df_spe['RANK_ORIGINAL'] = spe_rank['ORIGINAL']
df_spe['RANK_SMOTE'] = spe_rank['SMOTE']
df_spe['RANK_SMOTE_SVM'] = spe_rank['SMOTE_SVM']
df_spe['RANK_BORDERLINE1'] = spe_rank['BORDERLINE1']
df_spe['RANK_BORDERLINE2'] = spe_rank['BORDERLINE2']
df_spe['RANK_GEOMETRIC_SMOTE'] = spe_rank['GEOMETRIC_SMOTE']
df_spe['RANK_DELAUNAY'] = spe_rank['DELAUNAY']
df_f1 = df_f1.reset_index()
df_f1.drop('index', axis=1, inplace=True)
df_f1['RANK_ORIGINAL'] = f1_rank['ORIGINAL']
df_f1['RANK_SMOTE'] = f1_rank['SMOTE']
df_f1['RANK_SMOTE_SVM'] = f1_rank['SMOTE_SVM']
df_f1['RANK_BORDERLINE1'] = f1_rank['BORDERLINE1']
df_f1['RANK_BORDERLINE2'] = f1_rank['BORDERLINE2']
df_f1['RANK_GEOMETRIC_SMOTE'] = f1_rank['GEOMETRIC_SMOTE']
df_f1['RANK_DELAUNAY'] = f1_rank['DELAUNAY']
df_geo = df_geo.reset_index()
df_geo.drop('index', axis=1, inplace=True)
df_geo['RANK_ORIGINAL'] = geo_rank['ORIGINAL']
df_geo['RANK_SMOTE'] = geo_rank['SMOTE']
df_geo['RANK_SMOTE_SVM'] = geo_rank['SMOTE_SVM']
df_geo['RANK_BORDERLINE1'] = geo_rank['BORDERLINE1']
df_geo['RANK_BORDERLINE2'] = geo_rank['BORDERLINE2']
df_geo['RANK_GEOMETRIC_SMOTE'] = geo_rank['GEOMETRIC_SMOTE']
df_geo['RANK_DELAUNAY'] = geo_rank['DELAUNAY']
df_iba = df_iba.reset_index()
df_iba.drop('index', axis=1, inplace=True)
df_iba['RANK_ORIGINAL'] = iba_rank['ORIGINAL']
df_iba['RANK_SMOTE'] = iba_rank['SMOTE']
df_iba['RANK_SMOTE_SVM'] = iba_rank['SMOTE_SVM']
df_iba['RANK_BORDERLINE1'] = iba_rank['BORDERLINE1']
df_iba['RANK_BORDERLINE2'] = iba_rank['BORDERLINE2']
df_iba['RANK_GEOMETRIC_SMOTE'] = iba_rank['GEOMETRIC_SMOTE']
df_iba['RANK_DELAUNAY'] = iba_rank['DELAUNAY']
df_auc = df_auc.reset_index()
df_auc.drop('index', axis=1, inplace=True)
df_auc['RANK_ORIGINAL'] = auc_rank['ORIGINAL']
df_auc['RANK_SMOTE'] = auc_rank['SMOTE']
df_auc['RANK_SMOTE_SVM'] = auc_rank['SMOTE_SVM']
df_auc['RANK_BORDERLINE1'] = auc_rank['BORDERLINE1']
df_auc['RANK_BORDERLINE2'] = auc_rank['BORDERLINE2']
df_auc['RANK_GEOMETRIC_SMOTE'] = auc_rank['GEOMETRIC_SMOTE']
df_auc['RANK_DELAUNAY'] = auc_rank['DELAUNAY']
# avarege rank
media_pre_rank = pre_rank.mean(axis=0)
media_rec_rank = rec_rank.mean(axis=0)
media_spe_rank = spe_rank.mean(axis=0)
media_f1_rank = f1_rank.mean(axis=0)
media_geo_rank = geo_rank.mean(axis=0)
media_iba_rank = iba_rank.mean(axis=0)
media_auc_rank = auc_rank.mean(axis=0)
media_pre_rank_file = media_pre_rank.reset_index()
media_pre_rank_file = media_pre_rank_file.sort_values(by=0)
media_rec_rank_file = media_rec_rank.reset_index()
media_rec_rank_file = media_rec_rank_file.sort_values(by=0)
media_spe_rank_file = media_spe_rank.reset_index()
media_spe_rank_file = media_spe_rank_file.sort_values(by=0)
media_f1_rank_file = media_f1_rank.reset_index()
media_f1_rank_file = media_f1_rank_file.sort_values(by=0)
media_geo_rank_file = media_geo_rank.reset_index()
media_geo_rank_file = media_geo_rank_file.sort_values(by=0)
media_iba_rank_file = media_iba_rank.reset_index()
media_iba_rank_file = media_iba_rank_file.sort_values(by=0)
media_auc_rank_file = media_auc_rank.reset_index()
media_auc_rank_file = media_auc_rank_file.sort_values(by=0)
# Grava arquivos importantes
df_pre.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
df_rec.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
df_spe.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
df_f1.to_csv(wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
df_geo.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
df_iba.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
df_auc.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
media_auc_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
'''avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()'''
print('Delaunay Type= ', delaunay_type)
print('Algorithm= ', name)
def rank_total_by_algorithm(self, tipo, wd, reducao, order, alpha):
delaunay_name = 'RANK_DTO_' + str(order) + '_' + str(alpha)
cols = ['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1',
'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
for name in classifiers_list:
print(os.path.abspath(os.getcwd()))
# Grava arquivos importantes
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_pre.csv'
df_pre = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_rec.csv'
df_rec = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_spe.csv'
df_spe = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_f1.csv'
df_f1 = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_geo.csv'
df_geo = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_iba.csv'
df_iba = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_auc.csv'
df_auc = pd.read_csv(path_name)
# PRE
df_pre_col = df_pre[cols]
df_pre_col.loc[:, delaunay_name] = df_pre_col['RANK_DELAUNAY'].values
df_pre_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_pre = df_pre_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv'
ranking_pre['ALGORITHM'] = name
ranking_pre.to_csv(path_name, index=False)
# REC
df_rec_col = df_rec[cols]
df_rec_col.loc[:, delaunay_name] = df_rec_col['RANK_DELAUNAY'].values
df_rec_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_rec = df_rec_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv'
ranking_rec['ALGORITHM'] = name
ranking_rec.to_csv(path_name, index=False)
# SPE
df_spe_col = df_spe[cols]
df_spe_col.loc[:, delaunay_name] = df_spe_col['RANK_DELAUNAY'].values
df_spe_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_spe = df_spe_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv'
ranking_spe['ALGORITHM'] = name
ranking_spe.to_csv(path_name, index=False)
# F1
df_f1_col = df_f1[cols]
df_f1_col.loc[:, delaunay_name] = df_f1_col['RANK_DELAUNAY'].values
df_f1_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_f1 = df_f1_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv'
ranking_f1['ALGORITHM'] = name
ranking_f1.to_csv(path_name, index=False)
# GEO
df_geo_col = df_geo[cols]
df_geo_col.loc[:, delaunay_name] = df_geo_col['RANK_DELAUNAY'].values
df_geo_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_geo = df_geo_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv'
ranking_geo['ALGORITHM'] = name
ranking_geo.to_csv(path_name, index=False)
# IBA
df_iba_col = df_iba[cols]
df_iba_col.loc[:, delaunay_name] = df_iba_col['RANK_DELAUNAY'].values
df_iba_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_iba = df_iba_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv'
ranking_iba['ALGORITHM'] = name
ranking_iba.to_csv(path_name, index=False)
# AUC
df_auc_col = df_auc[cols]
df_auc_col.loc[:, delaunay_name] = df_auc_col['RANK_DELAUNAY'].values
df_auc_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_auc = df_auc_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv'
ranking_auc['ALGORITHM'] = name
ranking_auc.to_csv(path_name, index=False)
def rank_by_algorithm_dataset(self, filename):
df = pd.read_csv(filename)
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv('./../output_dir/rank/rank_algorithm_dataset_' + name + '_' + name1 + '.csv', index=False)
def rank_by_algorithm_dataset_only_dto(self, filename):
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv(
'./../output_dir/rank/only_dto/rank_algorithm_dataset_only_dto_' + name + '_' + name1 + '.csv',
index=False)
df_graph = group1.copy()
df_graph = df_graph.replace('area', 1)
df_graph = df_graph.replace('volume', 2)
df_graph = df_graph.replace('area_volume_ratio', 3)
df_graph = df_graph.replace('edge_ratio', 4)
df_graph = df_graph.replace('radius_ratio', 5)
df_graph = df_graph.replace('aspect_ratio', 6)
df_graph = df_graph.replace('max_solid_angle', 7)
df_graph = df_graph.replace('min_solid_angle', 8)
df_graph = df_graph.replace('solid_angle', 9)
legend = ['area', 'volume', 'area_volume_ratio', 'edge_ratio', 'radius_ratio', 'aspect_ratio',
'max_solid_angle', 'min_solid_angle', 'solid_angle']
x = df_graph['ORDER'].values
y = df_graph['ALPHA'].values.astype(float)
dz = df_graph['AUC'].values
N = x.shape[0]
z = np.zeros(N)
dx = 0.2 * np.ones(N)
dy = 0.2 * np.ones(N)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(111, projection='3d')
cs = ['r', 'g', 'b'] * 9
ax1.bar3d(x, y, z, dx, dy, dz, color=cs)
ax1.set_ylabel('Alpha')
ax1.set_xlabel('\n\n\n\n\nGeometry')
ax1.set_zlabel('AUC')
ax1.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax1.set_xticklabels(legend)
ax1.legend()
plt.show()
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig)
surf = ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.5)
fig.colorbar(surf, shrink=0.5, aspect=7)
ax.set_xlabel('Alpha')
ax.set_ylabel('\n\n\n\n\nGeometry')
ax.set_zlabel('AUC')
ax.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax.set_yticklabels(legend)
ax.legend()
plt.savefig('./../output_dir/rank/only_dto/only_dto_geometry_by_alpha_' + name + '_' + name1 + '.pdf')
plt.show()
def rank_by_measures_only_dto(self, filename):
best_geometry = pd.DataFrame(columns=['PREPROC', 'M', 'ALGORITHM', 'MEDIA_RANK'])
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
i = 0
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
group['rank_f1'] = group['F1'].rank(ascending=False)
group['rank_geo'] = group['GEO'].rank(ascending=False)
group['rank_iba'] = group['IBA'].rank(ascending=False)
group['rank_auc'] = group['AUC'].rank(ascending=False)
# AUC
group = group.sort_values(by=['rank_auc'])
media_rank_auc = group.groupby('PREPROC')['rank_auc'].mean()
df_media_rank_auc = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_AUC'])
df_media_rank_auc['PREPROC'] = media_rank_auc.index
df_media_rank_auc['MEDIA_RANK_AUC'] = media_rank_auc.values
df_media_rank_auc.sort_values(by=['MEDIA_RANK_AUC'], ascending=True, inplace=True)
df_media_rank_auc.reset_index(inplace=True)
df_media_rank_auc.drop('index', axis=1, inplace=True)
best_auc_geometry = df_media_rank_auc.loc[0]
# GEO
group = group.sort_values(by=['rank_geo'])
media_rank_geo = group.groupby('PREPROC')['rank_geo'].mean()
df_media_rank_geo = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_GEO'])
df_media_rank_geo['PREPROC'] = media_rank_geo.index
df_media_rank_geo['MEDIA_RANK_GEO'] = media_rank_geo.values
df_media_rank_geo.sort_values(by=['MEDIA_RANK_GEO'], ascending=True, inplace=True)
df_media_rank_geo.reset_index(inplace=True)
df_media_rank_geo.drop('index', axis=1, inplace=True)
best_geo_geometry = df_media_rank_geo.loc[0]
# IBA
group = group.sort_values(by=['rank_iba'])
media_rank_iba = group.groupby('PREPROC')['rank_iba'].mean()
df_media_rank_iba = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_IBA'])
df_media_rank_iba['PREPROC'] = media_rank_iba.index
df_media_rank_iba['MEDIA_RANK_IBA'] = media_rank_iba.values
df_media_rank_iba.sort_values(by=['MEDIA_RANK_IBA'], ascending=True, inplace=True)
df_media_rank_iba.reset_index(inplace=True)
df_media_rank_iba.drop('index', axis=1, inplace=True)
best_iba_geometry = df_media_rank_iba.loc[0]
# F1
group = group.sort_values(by=['rank_f1'])
media_rank_f1 = group.groupby('PREPROC')['rank_f1'].mean()
df_media_rank_f1 = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_F1'])
df_media_rank_f1['PREPROC'] = media_rank_f1.index
df_media_rank_f1['MEDIA_RANK_F1'] = media_rank_f1.values
df_media_rank_f1.sort_values(by=['MEDIA_RANK_F1'], ascending=True, inplace=True)
df_media_rank_f1.reset_index(inplace=True)
df_media_rank_f1.drop('index', axis=1, inplace=True)
best_f1_geometry = df_media_rank_f1.loc[0]
best_geometry.loc[i + 0, 'PREPROC'] = best_auc_geometry[0]
best_geometry.loc[i + 0, 'MEDIA_RANK'] = best_auc_geometry[1]
best_geometry.loc[i + 0, 'ALGORITHM'] = name
best_geometry.loc[i + 0, 'M'] = 'AUC'
best_geometry.loc[i + 1, 'PREPROC'] = best_geo_geometry[0]
best_geometry.loc[i + 1, 'MEDIA_RANK'] = best_geo_geometry[1]
best_geometry.loc[i + 1, 'ALGORITHM'] = name
best_geometry.loc[i + 1, 'M'] = 'GEO'
best_geometry.loc[i + 2, 'PREPROC'] = best_iba_geometry[0]
best_geometry.loc[i + 2, 'MEDIA_RANK'] = best_iba_geometry[1]
best_geometry.loc[i + 2, 'ALGORITHM'] = name
best_geometry.loc[i + 2, 'M'] = 'IBA'
best_geometry.loc[i + 3, 'PREPROC'] = best_f1_geometry[0]
best_geometry.loc[i + 3, 'MEDIA_RANK'] = best_f1_geometry[1]
best_geometry.loc[i + 3, 'ALGORITHM'] = name
best_geometry.loc[i + 3, 'M'] = 'F1'
i += 4
group.to_csv('./../output_dir/rank/rank_by_measures' + '_' + name + '.csv', index=False)
best_geometry.to_csv('./../output_dir/rank/best_dto_geometry_rank.csv', index=False)
def find_best_dto(self):
'''
Find best DTO geometry and alpha parameter
:return:
'''
df = pd.read_csv('./../output_dir/rank/rank_by_measures.csv')
# AUC
best_dto_auc = df.groupby(['ORDER', 'ALPHA', 'ALGORITHM'])['rank_auc']
short = best_dto_auc.min().sort_values()
min_auc_rank = short[0]
df_min_auc = df[df['rank_auc'] == min_auc_rank]
number = Counter(df_min_auc['PREPROC'])
auc_choices = number.most_common()
# GEO
best_dto_geo = df.groupby(['ORDER', 'ALPHA', 'ALGORITHM'])['rank_geo']
short = best_dto_geo.min().sort_values()
min_geo_rank = short[0]
df_min_geo = df[df['rank_geo'] == min_geo_rank]
number = Counter(df_min_geo['PREPROC'])
geo_choices = number.most_common()
# IBA
best_dto_iba = df.groupby(['ORDER', 'ALPHA', 'ALGORITHM'])['rank_iba']
short = best_dto_iba.min().sort_values()
min_iba_rank = short[0]
df_min_iba = df[df['rank_iba'] == min_iba_rank]
number = Counter(df_min_iba['PREPROC'])
iba_choices = number.most_common()
# F1
best_dto_f1 = df.groupby(['ORDER', 'ALPHA', 'ALGORITHM'])['rank_f1']
short = best_dto_f1.min().sort_values()
min_f1_rank = short[0]
df_min_f1 = df[df['rank_f1'] == min_f1_rank]
number = Counter(df_min_f1['PREPROC'])
f1_choices = number.most_common()
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d1.update(auc_choices)
d2.update(geo_choices)
d3.update(iba_choices)
d4.update(f1_choices)
print(auc_choices)
print(geo_choices)
print(iba_choices)
print(f1_choices)
x = np.arange(len(auc_choices))
fig = plt.figure(figsize=(12, 12))
fig.autofmt_xdate(rotation=90, ha='center')
x1 = np.arange(len(geo_choices))
ax = plt.subplot(111)
ax.bar(x1, d1.values(), width=0.2, color='b', align='center')
ax.bar(x1 - 0.2, d2.values(), width=0.2, color='g', align='center')
ax.bar(x1 - 0.4, d3.values(), width=0.2, color='r', align='center')
ax.bar(x1 - 0.6, d4.values(), width=0.2, color='c', align='center')
ax.legend(('AUC', 'GEO', 'IBA', 'F1'))
plt.xticks(x1, d1.keys(), rotation=90)
plt.title("Best DTO", fontsize=17)
plt.ylabel('FREQUENCY WON')
ax.grid(which='both')
plt.savefig('./../output_dir/rank/best_dto_geometry_alpha.pdf', dpi=200)
# plt.show()
plt.close()
def graficos(self):
order = ['area', 'volume', 'area_volume_ratio', 'edge_ratio', 'radius_ratio', 'aspect_ratio', 'max_solid_angle',
'min_solid_angle', 'solid_angle']
alpha = [1, 4, 9]
algorithm = ['RF', 'KNN', 'DTREE', 'GNB', 'LRG', 'ABC', 'MLP', 'QDA', 'SVM', 'SGD']
pref = 'pca_total_rank_biclasse_'
measures = ['auc', 'geo', 'iba', 'f1']
preproc = ["_train", "_SMOTE", "_Borderline1", "_Borderline2", "_smoteSVM", "_Geometric_SMOTE"]
dfrank = pd.DataFrame(columns=['ALGORITHM', 'UNIT', 'PREPROC', 'ALPHA', 'MEDIA_RANK_ORIGINAL', 'MEDIA_RANK_DTO',
'MEDIA_RANK_GEO_SMOTE',
'MEDIA_RANK_SMOTE', 'MEDIA_RANK_SMOTE_SVM', 'MEDIA_RANK_B1', 'MEDIA_RANK_B2'])
i = 0
for m in measures:
for o in order:
for a in alpha:
for alg in algorithm:
df = pd.read_csv(
'./../rank/pca_biclasse/' + pref + o + '_' + str(a) + '_' + alg + '_' + m + '.csv')
dfrank.loc[i, 'ALGORITHM'] = alg
dfrank.loc[i, 'UNIT'] = m
dfrank.loc[i, 'PREPROC'] = o
dfrank.loc[i, 'ALPHA'] = a
mro = df.RANK_ORIGINAL.mean()
mrdto = df.RANK_DELAUNAY.mean()
mrgeosmote = df.RANK_GEOMETRIC_SMOTE.mean()
mrs = df.RANK_SMOTE.mean()
mrssvm = df.RANK_SMOTE_SVM.mean()
mrbl1 = df.RANK_BORDERLINE1.mean()
mrbl2 = df.RANK_BORDERLINE2.mean()
dfrank.loc[i, 'MEDIA_RANK_ORIGINAL'] = mro
dfrank.loc[i, 'MEDIA_RANK_DTO'] = mrdto
dfrank.loc[i, 'MEDIA_RANK_GEO_SMOTE'] = mrgeosmote
dfrank.loc[i, 'MEDIA_RANK_SMOTE'] = mrs
dfrank.loc[i, 'MEDIA_RANK_SMOTE_SVM'] = mrssvm
dfrank.loc[i, 'MEDIA_RANK_B1'] = mrbl1
dfrank.loc[i, 'MEDIA_RANK_B2'] = mrbl2
i += 1
dfrank.to_csv('./../output_dir/media_rank_all_alpha_order.csv', index=False)
def rank_dto_by(self,geometry):
#M = ['_pre.csv', '_rec.csv', '_spe.csv', '_f1.csv', '_geo.csv','_iba.csv', '_auc.csv']
M = ['_pre.csv', '_rec.csv', '_spe.csv', '_f1.csv', '_geo.csv', '_iba.csv']
df_media_rank = pd.DataFrame(columns=['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE',
'RANK_SMOTE_SVM', 'RANK_BORDERLINE1', 'RANK_BORDERLINE2',
'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY','unit'])
name = './../output_dir/biclass/total_rank/pca_total_rank_biclasse_' + geometry + '_'
for m in M:
i = 0
for c in classifiers_list:
df = | pd.read_csv(name + c + m) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:30:17 2018
@author: avelinojaver
"""
from tierpsy.features.tierpsy_features.summary_stats import get_summary_stats
from tierpsy.summary.helper import augment_data, add_trajectory_info
from tierpsy.helper.params import read_fps
from tierpsy.helper.misc import WLAB,print_flush
from tierpsy.analysis.split_fov.helper import was_fov_split
import pandas as pd
import pdb
#%%
def time_to_frame_nb(time_windows,time_units,fps,timestamp,fname):
"""
Converts the time windows to units of frame numbers (if they were defined in seconds).
It also defines the end frame of a window, if the index is set to -1 (end).
"""
if timestamp.empty:
return
from copy import deepcopy
time_windows_frames = deepcopy(time_windows)
if time_units == 'seconds':
assert fps!=-1
for iwin in range(len(time_windows_frames)):
for ilim in range(2):
if time_windows_frames[iwin][ilim]!=-1:
time_windows_frames[iwin][ilim] = round(time_windows_frames[iwin][ilim]*fps)
last_frame = timestamp.sort_values().iloc[-1]
for iwin in range(len(time_windows_frames)):
# If a window ends with -1, replace with the frame number of the last frame (or the start frame of the window+1 if window out of bounds)
if time_windows_frames[iwin][1]==-1:
time_windows_frames[iwin][1] = max(last_frame+1,time_windows_frames[iwin][0])
# If a window is out of bounds, print warning
if time_windows_frames[iwin][0]>last_frame:
print_flush('Warning: The start time of window {}/{} is out of bounds of file \'{}\'.'.format(iwin+1,len(time_windows_frames),fname))
return time_windows_frames
def no_fps(time_units,fps):
if time_units=='seconds' and fps==-1:
print_flush(
"""
Warning: The time windows were defined in seconds, but fps for file \'{}\' is unknown.
Define time windows in frame numbers instead.
""".format(fname)
)
return True
else:
return False
#%%
def read_data(fname, time_windows, time_units, fps, is_manual_index):
"""
Reads the timeseries_data and the blob_features for a given file within every time window.
return:
timeseries_data_list: list of timeseries_data for each time window (length of lists = number of windows)
blob_features_list: list of blob_features for each time window (length of lists = number of windows)
"""
# EM: If time_units=seconds and fps is not defined, then return None with warning of no fps.
# Make this check here, to avoid wasting time reading the file
if no_fps(time_units,fps):
return
with | pd.HDFStore(fname, 'r') | pandas.HDFStore |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pprint
import config.settings
import config.strategy
from core.utility import chunk_trades, sharpe, drawdown
from multiprocessing_on_dill import Pool #, Process, Manager
from contextlib import closing
class accountCurve():
"""
Account curve object for Portfolio and Instrument.
Calculates the positions we want to be in, based on the volatility target.
"""
def __init__(self, portfolio, capital=500000, positions=None, panama_prices=None, nofx=False, portfolio_weights = 1, **kw):
self.portfolio = portfolio
self.nofx = nofx
self.weights = portfolio_weights
self.multiproc = kw.get('multiproc', True)
# If working on one instrument, put it in a list
if not isinstance(portfolio, list):
self.portfolio = [self.portfolio]
if isinstance(positions, pd.Series):
positions = positions.rename(self.portfolio[0].name)
self.capital = capital
self.panama = panama_prices
if positions is None:
self.positions = self.instrument_positions()
self.positions = self.positions.multiply(self.weights)
else:
self.positions = pd.DataFrame(positions)
# Reduce all our positions so that they fit inside our target volatility when combined.
self.positions = self.positions.multiply(self.vol_norm(),axis=0)
# If we run out of data (for example, if the data feed is stopped), hold position for 5 trading days and then close.
# chunk_trades() is a function that is designed to reduce the amount of trading (and hence cost)
self.positions = chunk_trades(self.positions).ffill(limit=5).fillna(0)
def __repr__(self):
"""
Returns a formatted list of statistics about the account curve.
"""
return pprint.pformat(self.stats_list())
def inst_calc(self):
"""Calculate all the things we need on all the instruments and cache it."""
try:
return self.memo_inst_calc
except:
if len(self.portfolio)>1 and self.multiproc:
with closing(Pool()) as pool:
self.memo_inst_calc = dict(pool.map(lambda x: (x.name, x.calculate()), self.portfolio))
else:
self.memo_inst_calc = dict(map(lambda x: (x.name, x.calculate()), self.portfolio))
return self.memo_inst_calc
def instrument_positions(self):
"""Position returned by the instrument objects, not the final position in the portfolio"""
try:
return self.memo_instrument_positions
except:
self.memo_instrument_positions = pd.DataFrame({k: v['position'] for k, v in self.inst_calc().items()})
return self.memo_instrument_positions
def rates(self):
"""
Returns a Series or DataFrame of exchange rates.
"""
if self.nofx==True:
return 1
try:
return self.memo_rates
except:
self.memo_rates = pd.DataFrame({k: v['rate'] for k, v in self.inst_calc().items()})
return self.memo_rates
def stats_list(self):
stats_list = ["sharpe",
"gross_sharpe",
"annual_vol",
"sortino",
"cap",
"avg_drawdown",
"worst_drawdown",
"time_in_drawdown",
"calmar",
"avg_return_to_drawdown"]
return {k: getattr(self, k)() for k in stats_list}
def returns(self):
"""
Returns a Series/Frame of net returns after commissions, spreads and estimated slippage.
"""
return self.position_returns() + self.transaction_returns() + self.commissions() + self.spreads()
def position_returns(self):
"""The returns from holding the portfolio we had yesterday"""
# We shift back 2, as self.positions is the frontier - tomorrow's ideal position.
return (self.positions.shift(2).multiply((self.panama_prices()).diff(), axis=0).fillna(0) * self.point_values()) * self.rates()
def transaction_returns(self):
"""Estimated returns from transactions including slippage. Uses the average settlement price of the last two days"""
# self.positions.diff().shift(1) = today's trades
slippage_multiplier = .5
return (self.positions.diff().shift(1).multiply((self.panama_prices()).diff()*slippage_multiplier, axis=0).fillna(0) * self.point_values()) * self.rates()
def commissions(self):
commissions = pd.Series({v.name: v.commission for v in self.portfolio})
return (self.positions.diff().shift(1).multiply(commissions)).fillna(0).abs()*-1
def spreads(self):
spreads = pd.Series({v.name: v.spread for v in self.portfolio})
return (self.positions.diff().shift(1).multiply(spreads * self.point_values() * self.rates())).fillna(0).abs()*-1
def vol_norm(self):
return (config.strategy.daily_volatility_target * self.capital / \
(self.returns().sum(axis=1).shift(2).ewm(span=50).std())).clip(0,1.5)
def panama_prices(self):
if self.panama is not None:
return pd.DataFrame(self.panama)
else:
try:
return self.memo_panama_prices
except:
self.memo_panama_prices = pd.DataFrame({k: v['panama_prices'] for k, v in self.inst_calc().items()})
return self.memo_panama_prices
def point_values(self):
return | pd.Series({v.name: v.point_value for v in self.portfolio}) | pandas.Series |
"""Preprocessing data methods."""
import numpy as np
import pandas as pd
from autots.tools.impute import FillNA
def remove_outliers(df, std_threshold: float = 3):
"""Replace outliers with np.nan.
https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
Args:
df (pandas.DataFrame): DataFrame containing numeric data, DatetimeIndex
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]
return df
def clip_outliers(df, std_threshold: float = 3):
"""Replace outliers above threshold with that threshold. Axis = 0.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df2 = df.clip(lower=lower, upper=upper, axis=1)
return df2
def simple_context_slicer(df, method: str = 'None', forecast_length: int = 30):
"""Condensed version of context_slicer with more limited options.
Args:
df (pandas.DataFrame): training data frame to slice
method (str): Option to slice dataframe
'None' - return unaltered dataframe
'HalfMax' - return half of dataframe
'ForecastLength' - return dataframe equal to length of forecast
'2ForecastLength' - return dataframe equal to twice length of forecast
(also takes 4, 6, 8, 10 in addition to 2)
"""
if method in [None, "None"]:
return df
df = df.sort_index(ascending=True)
if 'forecastlength' in str(method).lower():
len_int = int([x for x in str(method) if x.isdigit()][0])
return df.tail(len_int * forecast_length)
elif method == 'HalfMax':
return df.tail(int(len(df.index) / 2))
elif str(method).isdigit():
return df.tail(int(method))
else:
print("Context Slicer Method not recognized")
return df
"""
if method == '2ForecastLength':
return df.tail(2 * forecast_length)
elif method == '6ForecastLength':
return df.tail(6 * forecast_length)
elif method == '12ForecastLength':
return df.tail(12 * forecast_length)
elif method == 'ForecastLength':
return df.tail(forecast_length)
elif method == '4ForecastLength':
return df.tail(4 * forecast_length)
elif method == '8ForecastLength':
return df.tail(8 * forecast_length)
elif method == '10ForecastLength':
return df.tail(10 * forecast_length)
"""
class Detrend(object):
"""Remove a linear trend from the data."""
def __init__(self):
self.name = 'Detrend'
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
from statsmodels.regression.linear_model import GLS
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly df.index.astype( int ).values
y = df.values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
self.model = GLS(y, X, missing='drop').fit()
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly X = df.index.astype( int ).values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) - self.model.predict(X)
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) + self.model.predict(X)
return df
class StatsmodelsFilter(object):
"""Irreversible filters."""
def __init__(self, method: str = 'bkfilter'):
self.method = method
def fit(self, df):
"""Fits filter.
Args:
df (pandas.DataFrame): input dataframe
"""
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
if self.method == 'bkfilter':
from statsmodels.tsa.filters import bk_filter
cycles = bk_filter.bkfilter(df, K=1)
cycles.columns = df.columns
df = (df - cycles).fillna(method='ffill').fillna(method='bfill')
elif self.method == 'cffilter':
from statsmodels.tsa.filters import cf_filter
cycle, trend = cf_filter.cffilter(df)
cycle.columns = df.columns
df = df - cycle
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
class SinTrend(object):
"""Modelling sin."""
def __init__(self):
self.name = 'SinTrend'
def fit_sin(self, tt, yy):
"""Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"
from user unsym @ https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
"""
import scipy.optimize
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(
ff[np.argmax(Fyy[1:]) + 1]
) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.0 ** 0.5
guess_offset = np.mean(yy)
guess = np.array([guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset])
def sinfunc(t, A, w, p, c):
return A * np.sin(w * t + p) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess, maxfev=10000)
A, w, p, c = popt
# f = w/(2.*np.pi)
# fitfunc = lambda t: A * np.sin(w*t + p) + c
return {
"amp": A,
"omega": w,
"phase": p,
"offset": c,
} # , "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
def fit(self, df):
"""Fits trend for later detrending
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = | pd.to_numeric(df.index, errors='coerce', downcast='integer') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_yield_change_correlation [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_yield_change_correlation&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation).
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.statistics import meancov_sp
from arpym.estimation import cov_2_corr, min_corr_toeplitz
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-parameters)
tau = np.arange(2, 10.25, 0.25) # times to maturity
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step00): Import data from database
path = '../../../databases/global-databases/fixed-income/db_yields/'
df = pd.read_csv(path + 'data.csv', index_col=0)
y = np.array(df.loc[:, tau.astype('str')])
y = y[1800:, ] # remove missing data
fx_df = pd.read_csv(path + 'data.csv', usecols=['dates'],
parse_dates=['dates'])
fx_df = fx_df[1801:]
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step01): Compute invariants
x = np.diff(y, n=1, axis=0)
t_, n_ = x.shape
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step02): Compute HFP mean, covariance, correlation and vector of standard deviations
m_hat_HFP_x, s2_hat_HFP_x = meancov_sp(x)
c2_HFP_x, s_vec = cov_2_corr(s2_hat_HFP_x)
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step03): Fit and compute the Toeplitz cross-diagonal form
c2_star, gamma_star = min_corr_toeplitz(c2_HFP_x, tau)
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step04): Save the data
# +
output = {
'tau': pd.Series(tau),
'n_': pd.Series(x.shape[1]),
'gamma_star': pd.Series(gamma_star),
'm_hat_HFP_x': pd.Series(m_hat_HFP_x),
's2_hat_HFP_x': pd.Series((s2_hat_HFP_x.reshape(-1))),
's_vec': | pd.Series(s_vec) | pandas.Series |
import keras
from keras.layers import Input, Conv1D, Activation, MaxPooling1D, UpSampling1D
from keras.models import Model
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
import argparse
# Constants
batch_size = 16
epochs = 50
# Window Function
def window(data, window_size): # With overlay
windowed_data = []
i = 0
while(i + window_size-1 < len(data)):
windowed_data.append(data[i:(i+window_size)])
i += window_size//2
if((i - (window_size//2) + window_size-1) != len(data)):
i = len(data) - window_size
windowed_data.append(data[i:len(data)]) # add the rest
return windowed_data
def parser_args(cmd_args):
parser = argparse.ArgumentParser(sys.argv[0], description="", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-e", "--exp", type=str, action="store", default="pairwise_distances", help="Experiment")
parser.add_argument("-d", "--dataset", type=str, action="store", default="PigArtPressure", help="Dataset name")
return parser.parse_args(cmd_args)
# obtaining arguments from command line
args = parser_args(sys.argv[1:])
dataset = args.dataset
exp = args.exp
if (exp == "pairwise_distances"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '.txt', delimiter=' ',)
print("Data shape:", data.shape)
elif (exp == "similarity_search"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + 'Data.txt', delimiter=' ',)
print("Data shape:", data.shape)
else:
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + dataset + '_train.txt', delimiter=' ',)
print("Data shape:", data.shape)
# Getting rid of the NaNs and infs with interpolation
if (len(data.shape) == 1):
data = np.array( | pd.Series(data) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.