prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 15:07:09 2016
@author: advena
"""
#import re
from datetime import datetime
#import numpy as np
import pandas as pd
import os
import sys
import shutil
from dateutil import parser
########################################################################
# User Inputs #
########################################################################
# Do not edit the PJM owner list (except to correct it)
pjm_owner_list=[202,205,209,212,215,222,225,320,345]
# Filter the data down to these owners.
# For PJM only, set compare_owner_list = pjm_owner_list
# For all untis, set compare_owner_list = []
compare_owner_list = pjm_owner_list
#src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
# Working directory. If src_dir != tgt_dir, a copy of the original raw
# files is copied to tgt_dir to prevent corruption of the originals.
# The copy here will have be modified; a new line will be added to the top
# to allow for csv parsing without error.
tgt_dir = r'C:\temp'
# The raw files to compare
raw_file1 = r'sum16idctr1p4_v32.RAW'
raw_file2 = r'sum16idctr1p6_v32.RAW'
# Maximim number of columns in any row, likely 28.
max_cols = 28 # set to zero to automatically determine.
# The regex pattern (before compiling) the identifies the header to a new section of the log file.
end_of_section_pattern="0 / "
########################################################################
# Function Dfinitions #
########################################################################
def max_col_cnt(filename):
'''
Finds row with mwx number of columns by counting commas
'''
max_commas = 0
lines = open(filename)
for line in lines:
cnt = line.count(',')
if cnt > max_commas:
max_commas = cnt
return max_commas + 1
def raw_to_df(src_dir, tgt_dir, filename, max_cols=28):
'''
src_dir: directory in which the raw files are located
tgt_dir: directory in which to copy the files
(to prevent corrupting originals)
filename: name of raw file (exluding path)
ins_hdr: True to add a generic header to the file (col1, col2, ...)
False if you already added a header to the file.
max_cols: The maximim number of columns in any row, likely 28.
'''
#create generic column headers
cols=["col"+str(i) for i in range(max_cols)]
#concatenate path and filename
src=os.path.join(src_dir,filename)
#copy both files to the target directory
if src_dir != tgt_dir and tgt_dir!=None and tgt_dir!='':
print(' copying raw file to working directory: ' + tgt_dir)
tgt=os.path.join(tgt_dir,filename)
shutil.copyfile(src, tgt)
else:
tgt=src
# return dataframe
print(' reading raw file into datafrme: ' + tgt_dir)
lst = pd.read_csv(open(tgt), names=cols, dtype= str )
return pd.DataFrame(lst)
def define_sections(df, end_of_section_pattern=end_of_section_pattern):
sections = []
first_row = 3
for row_num, row in df.iterrows():
if row[0][:4] == end_of_section_pattern:
#section_name = row[0][3:].replace("END OF","").strip()
section_name = row[0][11:]
#sections [from line, to line, section name]
sections += [[first_row, row_num, section_name]]
first_row = row_num+1
return sections
def parse_raw_header(first_3_rows):
data = list(first_3_rows.iloc[0,:][0:7])+[None]
data[7] = data[6]
data[5] = data[5].split('/')[1][-4].strip()
data[5] = data[5].split('/')[0].strip()
data += [first_3_rows.iloc[1,:][0]]
data += [first_3_rows.iloc[2,:][0] + ',' + first_3_rows.iloc[2,:][1]]
for i in range(len(data)):
first_3_rows.iloc[0,i]=data[i]
data=[item.strip(' ') for item in data]
data[6]=parser.parse(data[6])
data[7]=parser.parse(data[7])
cols = ['col0','col1','col2','col3','col4','col5','Updated','Updated2',\
'Case_Name','Updated3']
dtype_dict = {'col0':'float','col1':'float','col2':'float','col3':'float',\
'col4':'float','col5':'str','Updated':'str','Updated2':'str',\
'Case_Name':'str','Updated3':'str'}
df = pd.DataFrame([data])
#print('raw summary:')
#print(df)
df.columns = cols
df.dtype = dtype_dict
return df
def append_owner(df, owner_df):
''' Add Bus_Name columns to a Branch dataframe.
Branch1/2 only has bus numbers. Look up Bus_Name in the
Bus1 or Bus2 dataframe and apply
'''
ret = pd.merge(df, owner_df, left_on="Owner", right_on="Owner_Num", how="inner")
ret.drop('Owner_Num', axis=1, inplace=True)
return ret
def append_bus_info_to_branch(branch_df, bus_df):
''' Add Bus_Name columns to a Branch dataframe.
Branch1/2 only has bus numbers. Look up Bus_Name in the
Bus1 or Bus2 dataframe and apply
'''
bus_slim = bus_df.loc[:,['Bus_Num', 'Bus_Name']].copy()
# FROM bus
ret = pd.merge(branch_df, bus_slim, left_on="Fr_Bus_Num", right_on="Bus_Num", how="inner")
ret = ret.rename(columns = {'Bus_Name':'Fr_Bus_Name'})
ret.drop('Bus_Num', axis=1, inplace=True)
# TO bus
ret = pd.merge(ret, bus_slim, left_on="To_Bus_Num", right_on="Bus_Num", how="inner")
ret = ret.rename(columns = {'Bus_Name':'To_Bus_Name'})
ret.drop('Bus_Num', axis=1, inplace=True)
ret = ret[ret['Fr_Bus_Num'].notnull()]
return ret
def branch_df_compare(branch_df1, branch_df2):
'''
branch_cols=['Fr_Bus_Num','To_Bus_Num','ID','Line_R_pu',\
'Line_X_pu','Charging_pu','Rate_A_MVA',\
'Rate_B_MVA','Rate_C_MVA','Line_G_From_pu',\
'Line_B_From_pu','Line_G_To_pu','Line_B_To_pu',\
'In_Service','Code','Length','Owner',\
'Fraction']
'''
# dropped branch
ret = | pd.merge(branch_df1, branch_df2, how='left', on=['Fr_Bus_Num','To_Bus_Num']) | pandas.merge |
# INSERT LICENSE
# This script contains statistical functions
import copy
import random
from collections import Counter
from typing import Optional, Union, Tuple
import navis
import navis.interfaces.neuprint as nvneu
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import ks_2samp
from tqdm import tqdm
import itertools
import seaborn as sns
from matplotlib.lines import Line2D
from neuroboom.utils import check_valid_neuron_input
from neuroboom import morphoelectro as nbm
def presynapse_focality(
x: Union[navis.TreeNeuron, navis.NeuronList],
heal_fragmented_neuron: bool = False,
confidence_threshold: tuple((float, float)) = (0.9, 0.9),
num_synapses_threshold: int = 1,
):
"""
Finds the connections that are downstream of 'x', where the presynpases of 'x' are focalised
Parameters
--------
x: A matrix to perform DBSCAN on
heal_fragmented_neuron: bool
Whether to heal the neuron or not.
N.B. Its better to heal neurons during
import to save time in this function.
connector_confidence: tuple of floats
The confidence value used to threshold the synapses.
The first value (connector_confidence[0]) will be used to threshold presynapses
The second value (connector_confidence[1]) will be used to threshold postsynapses
num_samples_threshold: int
The minimum number of synapses a partner must have
to be included in the permutation test
Returns
--------
synapse_connections: A dataframe detailing the presynaptic connections
df: A dataframe to be populated by the permutation test function
Examples
--------
"""
x = check_valid_neuron_input(x)
if heal_fragmented_neuron is True:
x = navis.heal_fragmented_neuron(x)
# Getting the connector table of synapses where x.id is the source
synapse_connections = navis.interfaces.neuprint.fetch_synapse_connections(
source_criteria=x.id
)
synapse_connections.astype(object)
synapse_connections = nbm.match_connectors_to_nodes(synapse_connections,
x,
synapse_type='pre')
truth_list = [
True if len(np.unique(i)) > 1 else False
for i in synapse_connections.node.values
]
if synapse_connections[truth_list].shape[0] == 0:
synapse_connections.node = [
np.unique(k)[0] for k in synapse_connections.node.tolist()
]
else:
return "There are synapses associated with multiple nodes!!!!"
synapse_connections = synapse_connections[
synapse_connections.confidence_pre > confidence_threshold[0]
][synapse_connections.confidence_post > confidence_threshold[1]].copy()
count = Counter(synapse_connections.bodyId_post.tolist())
count = {
k: v for k, v in sorted(count.items(), key=lambda item: item[1], reverse=True)
}
truth_list = [
True if count[i] > num_synapses_threshold else False
for i in synapse_connections.bodyId_post
]
synapse_connections = synapse_connections[truth_list].copy()
df = pd.DataFrame()
df["partner_neuron"] = synapse_connections.bodyId_post.unique().tolist()
df["gT"] = ""
df["significance_val"] = ""
df["p_val"] = ""
df["num_syn"] = [count[i] for i in df.partner_neuron]
return (synapse_connections, df)
def postsynapse_focality(
x: Union[navis.TreeNeuron, navis.NeuronList],
heal_fragmented_neuron: bool = False,
split_neuron: bool = False,
confidence_threshold: tuple((float, float)) = (0.9, 0.9),
num_synapses_threshold: int = 1,
):
"""
Finds the connections that are downstream of 'x', where the presynpases of 'x' are focalised
Parameters
--------
x: A matrix to perform DBSCAN on
heal_fragmented_neuron: bool
Whether to heal the neuron or not.
N.B. Its better to heal neurons during
import to save time in this function.
connector_confidence: tuple of floats
The confidence value used to threshold the synapses.
The first value (connector_confidence[0]) will be used to threshold presynapses
The second value (connector_confidence[1]) will be used to threshold postsynapses
num_samples_threshold: int
The minimum number of synapses a partner must have
to be included in the permutation test
Returns
--------
synapse_connections: A dataframe detailing the presynaptic connections
df: A dataframe to be populated by the permutation test function
Examples
--------
"""
x = check_valid_neuron_input(x)
if heal_fragmented_neuron is True:
x = navis.heal_fragmented_neuron(x)
# Getting the connector table of synapses where x.id is the source
synapse_connections = navis.interfaces.neuprint.fetch_synapse_connections(
target_criteria=x.id
)
synapse_connections.astype(object)
synapse_connections = nbm.match_connectors_to_nodes(synapse_connections,
x,
synapse_type='post')
the_truth = [
True if len(np.unique(i)) > 1 else False
for i in synapse_connections.node.values
]
if synapse_connections[the_truth].shape[0] == 0:
synapse_connections.node = [
np.unique(k)[0] for k in synapse_connections.node.tolist()
]
else:
return "There are synapses associated with multiple nodes!!!!"
synapse_connections = synapse_connections[
synapse_connections.confidence_pre > confidence_threshold[0]
][synapse_connections.confidence_post > confidence_threshold[1]].copy()
count = Counter(synapse_connections.bodyId_pre.tolist())
count = {
k: v for k, v in sorted(count.items(), key=lambda item: item[1], reverse=True)
}
truth_list = [
True if count[i] > num_synapses_threshold else False
for i in synapse_connections.bodyId_pre
]
synapse_connections = synapse_connections[truth_list].copy()
df = pd.DataFrame()
df["partner_neuron"] = synapse_connections.bodyId_pre.unique().tolist()
df["gT"] = ""
df["significance_val"] = ""
df["p_val"] = ""
df["num_syn"] = [count[i] for i in df.partner_neuron]
return (synapse_connections, df)
def permut_test(
x: Union[navis.TreeNeuron, navis.NeuronList],
measuring_node: int,
synapse_connections: pd.DataFrame,
relation: str = "presyn",
num_iter: int = 10,
df: Optional = None,
count: Optional = None,
):
"""
Runs a permutation test on the geodesic distances for connections
Parameters
--------
x: navis.TreeNeuron
measuring_node: int
Node ID for which to measure the geodesic distance of synapses
synapse_connections: pandas.DataFrame
A DataFrame containin the synaptic connections
upon which the permutation test will be executed.
relation: str
Whether the synaptic connections included in
synapse_connections are presynapses or postsynapses
num_iter: int
Number of iterations to run the permutation test for
df: pandas.DataFrame
A DataFrame to record the p_values of the permutation test
count: dict
The total number of synaptic connections the
partner neuron has onto x
Returns
--------
df: A dataframe populated by the permutation test function
containing the p_values of the partners, determined by
the locations of their connections.
Examples
--------
"""
x = check_valid_neuron_input(x)
geo_mat = navis.geodesic_matrix(x, node_ids=measuring_node)
geo_mat = geo_mat.T
geo_mat.sort_values(by=[measuring_node], ascending=True, inplace=True)
for k, j in tqdm(enumerate(df.partner_neuron)):
if relation == "presyn":
total_distribution = geo_mat[
~geo_mat.index.isin(
synapse_connections[
synapse_connections.bodyId_post == j
].node.tolist()
)
]
specific_distribution = geo_mat[
geo_mat.index.isin(
synapse_connections[
synapse_connections.bodyId_post == j
].node.tolist()
)
]
elif relation == "postsyn":
total_distribution = geo_mat[
~geo_mat.index.isin(
synapse_connections[
synapse_connections.bodyId_pre == j
].node.tolist()
)
]
specific_distribution = geo_mat[
geo_mat.index.isin(
synapse_connections[
synapse_connections.bodyId_pre == j
].node.tolist()
)
]
total_mean = np.average(total_distribution.values)
specific_mean = np.average(specific_distribution.values)
gT = np.abs(total_mean - specific_mean)
df.iloc[k, 1] = gT
pV = list(total_distribution.values) + list(specific_distribution.values)
pV = [i[0] for i in pV]
pS = copy.copy(pV)
pD = []
for i in range(0, num_iter):
random.shuffle(pS)
pD.append(
np.abs(
np.average(pS[0: int(len(pS) / 2)])
- np.average(pS[int(len(pS) / 2):])
)
)
p_val = len(np.where(pD >= gT)[0]) / num_iter
df.iloc[k, 2] = p_val
ttest = stats.ttest_ind(total_distribution.values, specific_distribution.values)
df.iloc[k, 3] = ttest[1][0]
df.sort_values(by=["p_val"], ascending=True, inplace=True)
df.reset_index(inplace=True)
return df
def permutation_test_complete(
x: Union[navis.TreeNeuron, navis.NeuronList],
n_iter: int = 10,
remove_fragments: bool = True,
confidence_threshold: tuple = (0.9, 0.9)
):
"""
A wrapper function for the presynaptic and postsynaptic permutation test
functions, so that both can be performed with minimal code writing
by the user
Parameters
--------
x: navis.TreeNeuron
n_iter: int
Number of iterations to run the permutation test for
remove_fragments: bool
Whether to remove partners that are fragments/
have not been traced to completion/
do not have a soma.
Returns
--------
presyn_pt: A dataframe populated by the permutation test function
containing the p_values of the presynaptic partners,
determined by the locations of their connections.
postsyn_pt: A dataframe populated by the permutation test function
containing the p_values of the postsynaptic partners,
determined by the locations of their connections.
Examples
--------
"""
a_pre, a_df = presynapse_focality(
x, heal_fragmented_neuron=False, confidence_threshold=confidence_threshold
)
b_post, b_df = postsynapse_focality(
x, heal_fragmented_neuron=False, confidence_threshold=confidence_threshold
)
presyn_pt = permut_test(
x,
x.root[0],
synapse_connections=a_pre,
relation="presyn",
num_iter=n_iter,
df=a_df,
)
postsyn_pt = permut_test(
x,
x.root[0],
synapse_connections=b_post,
relation="postsyn",
num_iter=n_iter,
df=b_df,
)
a_partner_neurons, a_roi = nvneu.fetch_neurons(presyn_pt.partner_neuron.tolist())
b_partner_neurons, b_roi = nvneu.fetch_neurons(postsyn_pt.partner_neuron.tolist())
a_type_dict = dict(
zip(a_partner_neurons.bodyId.tolist(), a_partner_neurons.type.tolist())
)
b_type_dict = dict(
zip(b_partner_neurons.bodyId.tolist(), b_partner_neurons.type.tolist())
)
partner_dict = {**a_type_dict, **b_type_dict}
presyn_pt["partner_type"] = presyn_pt.partner_neuron.map(
partner_dict, na_action="ignore"
)
postsyn_pt["partner_type"] = postsyn_pt.partner_neuron.map(
partner_dict, na_action="ignore"
)
if remove_fragments:
presyn_pt = presyn_pt[~presyn_pt.partner_type.isnull()].copy()
postsyn_pt = postsyn_pt[~postsyn_pt.partner_type.isnull()].copy()
return (presyn_pt, postsyn_pt)
def prefocality_to_dendrogram_coloring(
x: pd.DataFrame,
p_val: float,
neuron: navis.TreeNeuron
):
"""
Function to take the results of synaptic focality tests and create colour dict for highlighting connectors
"""
x_thresh = x[x.p_val < p_val].copy()
partner_dict = dict(zip(x_thresh.partner_neuron, x_thresh.partner_type))
# fetching synapse connections
conn = nvneu.fetch_synapse_connections(source_criteria=neuron.id,
target_criteria=x_thresh.partner_neuron.tolist())
# filtering for highly probably synapses
conn_thresh = conn[(conn.confidence_pre > 0.9) & (conn.confidence_post > 0.9)].copy()
pal = sns.color_palette('turbo', len(partner_dict))
pal_dict = dict(zip(partner_dict.keys(), pal))
nodes_matched = nbm.match_connectors_to_nodes(conn_thresh, neuron, synapse_type='pre')
c2n = dict(zip(nodes_matched.connector, nodes_matched.bodyId_post))
c2color = {i: pal_dict[c2n[i]] for i in c2n.keys()}
return(c2color, c2n, conn_thresh, partner_dict)
def postfocality_to_dendrogram_coloring(
x: pd.DataFrame,
p_val: float,
neuron: navis.TreeNeuron
):
"""
Function to take the results of synaptic focality tests and create colour dict for plotting
"""
x_thresh = x[x.p_val < p_val].copy()
partner_dict = dict(zip(x_thresh.partner_neuron, x_thresh.partner_type))
# fetching synapse connections
conn = nvneu.fetch_synapse_connections(target_criteria=neuron.id,
source_criteria=x_thresh.partner_neuron.tolist())
# filtering for highly probably synapses
conn_thresh = conn[(conn.confidence_pre > 0.9) & (conn.confidence_post > 0.9)].copy()
pal = sns.color_palette('turbo', len(partner_dict))
pal_dict = dict(zip(partner_dict.keys(), pal))
nodes_matched = nbm.match_connectors_to_nodes(conn_thresh, neuron, synapse_type='post')
c2n = dict(zip(nodes_matched.connector, nodes_matched.bodyId_pre))
c2color = {i: pal_dict[c2n[i]] for i in c2n.keys()}
return(c2color, c2n, conn_thresh, partner_dict)
def make_legend_elements(connector_to_color, connector_to_neuron, partner_dict):
neuron_to_color = {connector_to_neuron[i]: connector_to_color[i] for i in connector_to_color.keys()}
legend_elements = []
for i in range(len(neuron_to_color)):
neuron = list(neuron_to_color.keys())[i]
legend_elements.append(Line2D([i], [0], marker='o',
color=neuron_to_color[neuron],
label=f'{neuron} : {partner_dict[neuron]}',
markerfacecolor=neuron_to_color[neuron],
markersize=60))
return(legend_elements)
#####################
## All By All synaptic Focality
#####################
# KS test
def synaptic_focality_KS_test(
x: navis.TreeNeuron,
synapse_type: str = 'pre',
confidence_threshold: Tuple = (0.9, 0.9)
):
if synapse_type == 'pre':
g_mat = navis.geodesic_matrix(x)
syn = nvneu.fetch_synapse_connections(source_criteria=x.id)
syn = syn[(syn.confidence_pre > confidence_threshold[0]) & (syn.confidence_post > confidence_threshold[1])].copy()
syn = nbm.match_connectors_to_nodes(syn, x, synapse_type=synapse_type)
df = pd.DataFrame()
df['partner_id'] = syn.bodyId_post.unique()
partner_gt = {}
partner_statistic = {}
partner_pval = {}
for i, j in enumerate(df.partner_id):
nodes = syn[syn.bodyId_post == j].node.tolist()
truth_array = np.isin(g_mat.index, nodes)
partner_geo_dist_vals = g_mat[truth_array].values.mean(axis=1)
total_geo_dist_vals = g_mat[~truth_array].values.mean(axis=1)
partner_gt[j] = partner_geo_dist_vals
KS_test = ks_2samp(partner_geo_dist_vals, total_geo_dist_vals)
partner_statistic[j] = KS_test.statistic
partner_pval[j] = KS_test.pvalue
df['gT'] = df.partner_id.map(partner_gt)
df['KS statistic'] = df.partner_id.map(partner_statistic)
df['KS pval'] = df.partner_id.map(partner_pval)
df['n_syn'] = [len(i) for i in df.gT]
elif synapse_type == 'post':
g_mat = navis.geodesic_matrix(x)
syn = nvneu.fetch_synapse_connections(target_criteria=x.id)
syn = syn[(syn.confidence_pre > confidence_threshold[0]) & (syn.confidence_post > confidence_threshold[1])].copy()
syn = nbm.match_connectors_to_nodes(syn, x, synapse_type=synapse_type)
df = pd.DataFrame()
df['partner_id'] = syn.bodyId_pre.unique()
partner_gt = {}
partner_statistic = {}
partner_pval = {}
for i, j in enumerate(df.partner_id):
nodes = syn[syn.bodyId_pre == j].node.tolist()
truth_array = np.isin(g_mat.index, nodes)
partner_geo_dist_vals = g_mat[truth_array].values.mean(axis=1)
total_geo_dist_vals = g_mat[~truth_array].values.mean(axis=1)
partner_gt[j] = partner_geo_dist_vals
KS_test = ks_2samp(partner_geo_dist_vals, total_geo_dist_vals)
partner_statistic[j] = KS_test.statistic
partner_pval[j] = KS_test.pvalue
df['gT'] = df.partner_id.map(partner_gt)
df['KS statistic'] = df.partner_id.map(partner_statistic)
df['KS pval'] = df.partner_id.map(partner_pval)
df['n_syn'] = [len(i) for i in df.gT]
return(df)
# permutation (enrichment analysis)
def calculate_T_obs(
neuron_id: int,
neuron_to_node_dict: dict,
gmat: pd.DataFrame,
two_sample: bool = True):
Anodes_to_query = neuron_to_node_dict[neuron_id]
An = len(Anodes_to_query)
Bnodes_to_query = gmat.index[~np.isin(gmat.index, Anodes_to_query)].to_numpy()
Bn = len(Bnodes_to_query)
A_mean = gmat.loc[Anodes_to_query, :].mean().mean()
B_mean = gmat.loc[Bnodes_to_query, :].mean().mean()
T_obs = A_mean - B_mean
if two_sample:
T_obs = abs(T_obs)
return(T_obs, An, Bn)
# def random_draw_sample_dist(
# n_iter: int,
# gmat: pd.DataFrame,
# T_obs: float,
# An: int,
# Bn: int
# ):
#
# A_draws = []
# B_draws = []
#
# for i in range(n_iter):
#
# rc_A = np.random.choice(gmat.index.to_numpy(), size=An, replace=False)
# rc_B = np.random.choice(gmat.index.to_numpy(), size=Bn, replace=False)
#
# sample_mean_A = gmat.loc[rc_A, :].mean().mean()
# sample_mean_B = gmat.loc[rc_B, :].mean().mean()
#
# A_draws.append(sample_mean_A)
# B_draws.append(sample_mean_B)
#
# cart_prod = [i for i in itertools.product(A_draws, B_draws)]
#
# sampled_permut_diff = abs(np.array([i - j for i, j in cart_prod]))
#
# p_value = sum(sampled_permut_diff >= T_obs) / len(sampled_permut_diff)
#
# return(p_value)
def random_draw_sample_dist(
n_iter: int,
gmat: pd.DataFrame,
T_obs: float,
An: int,
Bn: int,
):
rc_A_lst = np.array([
np.random.choice(
gmat.index,
size = An,
replace = False) for i in range(n_iter)])
rc_B_lst = np.array([
np.random.choice(
gmat.index,
size = Bn,
replace = False) for i in range(n_iter)])
rc_A_lst = rc_A_lst - 1
rc_B_lst = rc_B_lst - 1
sample_means_A = [gmat.values[i, :].mean().mean() for i in rc_A_lst]
sample_means_B = [gmat.values[i, :].mean().mean() for i in rc_B_lst]
cart_prod = [i for i in itertools.product(sample_means_A, sample_means_B)]
sampled_permut_diff = abs(np.array([i - j for i, j in cart_prod]))
p_val = sum(sampled_permut_diff >= T_obs) / len(sampled_permut_diff)
return(p_val)
def aba_presyn_focality(
neuron: navis.TreeNeuron,
confidence_threshold: Tuple = (0.0, 0.0),
n_iter: int = 100,
syn_thresh: int = 1,
):
print('Fetching synaptic connections...')
syn = nvneu.fetch_synapse_connections(source_criteria=neuron.id)
print('Thresholding synapses by confidences...')
syn = syn[(syn.confidence_pre > confidence_threshold[0]) & (syn.confidence_post > confidence_threshold[1])].copy()
print('Thresholding synapses by synapse count...')
count_dict = dict(Counter(syn.bodyId_post).most_common())
syn = syn[[count_dict[i] > syn_thresh for i in syn.bodyId_post]].copy()
print('Matching connections to nodes...')
# syn_wmc = synaptic connections with connectors matched
syn_wmc = nbm.match_connectors_to_nodes(syn, neuron, synapse_type='pre')
connector2node = dict(zip(neuron.connectors.connector_id, neuron.connectors.node_id))
syn_wmc['node'] = syn_wmc.connector.map(connector2node).to_numpy()
unique_usns = syn_wmc.bodyId_post.unique()
neuron_to_uNodes = {i: syn_wmc[syn_wmc.bodyId_post == i].node.unique() for i in unique_usns}
print('Calculating all by all geodesic matrix for nodes...')
g_mat = navis.geodesic_matrix(neuron)
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2019 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import os
import time
import itertools
import tempfile
import requests
from quantrocket.master import download_master_file
from quantrocket.exceptions import ParameterError, NoHistoricalData, NoRealtimeData
from quantrocket.history import (
download_history_file,
get_db_config as get_history_db_config,
list_databases as list_history_databases)
from quantrocket.realtime import (
download_market_data_file,
get_db_config as get_realtime_db_config,
list_databases as list_realtime_databases)
from quantrocket.zipline import (
list_bundles,
get_bundle_config,
download_bundle_file)
TMP_DIR = os.environ.get("QUANTROCKET_TMP_DIR", tempfile.gettempdir())
def get_prices(codes, start_date=None, end_date=None,
universes=None, sids=None,
exclude_universes=None, exclude_sids=None,
times=None, fields=None,
timezone=None, infer_timezone=None,
cont_fut=None, data_frequency=None):
"""
Query one or more history databases, real-time aggregate databases,
and/or Zipline bundles and load prices into a DataFrame.
For bar sizes smaller than 1-day, the resulting DataFrame will have a MultiIndex
with levels (Field, Date, Time). For bar sizes of 1-day or larger, the MultiIndex
will have levels (Field, Date).
Parameters
----------
codes : str or list of str, required
the code(s) of one or more databases to query. If multiple databases
are specified, they must have the same bar size. List databases in order of
priority (highest priority first). If multiple databases provide the same
field for the same sid on the same datetime, the first database's value will
be used.
start_date : str (YYYY-MM-DD), optional
limit to data on or after this date
end_date : str (YYYY-MM-DD), optional
limit to data on or before this date
universes : list of str, optional
limit to these universes (default is to return all securities in database)
sids : list of str, optional
limit to these sids
exclude_universes : list of str, optional
exclude these universes
exclude_sids : list of str, optional
exclude these sids
times: list of str (HH:MM:SS), optional
limit to these times, specified in the timezone of the relevant exchange. See
additional information in the Notes section regarding the timezone to use.
fields : list of str, optional
only return these fields. (If querying multiple databases that have different fields,
provide the complete list of desired fields; only the supported fields for each
database will be queried.)
timezone : str, optional
convert timestamps to this timezone, for example America/New_York (see
`pytz.all_timezones` for choices); ignored for non-intraday bar sizes
infer_timezone : bool
infer the timezone from the securities master Timezone field; defaults to
True if using intraday bars and no `timezone` specified; ignored for
non-intraday bars, or if `timezone` is passed
cont_fut : str
stitch futures into continuous contracts using this method (default is not
to stitch together). Only applicable to history databases. Possible choices:
concat
data_frequency : str
for Zipline bundles, whether to query minute or daily data. If omitted,
defaults to minute data for minute bundles and to daily data for daily bundles.
This parameter only needs to be set to request daily data from a minute bundle.
Possible choices: daily, minute (or aliases d, m).
Returns
-------
DataFrame
a MultiIndex (Field, Date) or (Field, Date, Time) DataFrame of prices
Notes
-----
The `times` parameter, if provided, is applied differently for history databases and
Zipline bundles vs real-time aggregate databases. For history databases and Zipline
bundles, the parameter is applied when querying the database. For real-time aggregate
databases, the parameter is not applied when querying the database; rather, all available
times are retrieved and the `times` filter is applied to the resulting DataFrame after
casting it to the appropriate timezone (as inferred from the securities master Timezone
field or as explicitly specified with the `timezone` parameter). The rationale for this
behavior is that history databases and Zipline bundles store intraday data in the timezone
of the relevant exchange whereas real-time aggregate databases store data in UTC. By
applying the `times` filter as described, users can specify the times in the timezone of
the relevant exchange for both types of databases.
Examples
--------
Load intraday prices:
>>> prices = get_prices('stk-sample-5min', fields=["Close", "Volume"])
>>> prices.head()
Sid FIBBG1 FIBBG2
Field Date Time
Close 2017-07-26 09:30:00 153.62 2715.0
09:35:00 153.46 2730.0
09:40:00 153.21 2725.0
09:45:00 153.28 2725.0
09:50:00 153.18 2725.0
Isolate the closes:
>>> closes = prices.loc["Close"]
>>> closes.head()
Sid FIBBG1 FIBBG2
Date Time
2017-07-26 09:30:00 153.62 2715.0
09:35:00 153.46 2730.0
09:40:00 153.21 2725.0
09:45:00 153.28 2725.0
09:50:00 153.18 2725.0
Isolate the 15:45:00 prices:
>>> session_closes = closes.xs("15:45:00", level="Time")
>>> session_closes.head()
Sid FIBBG1 FIBBG2
Date
2017-07-26 153.29 2700.00
2017-07-27 150.10 2660.00
2017-07-28 149.43 2650.02
2017-07-31 148.99 2650.34
2017-08-01 149.72 2675.50
"""
# Import pandas lazily since it can take a moment to import
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this function")
try:
import pytz
except ImportError:
raise ImportError("pytz must be installed to use this function")
if timezone and timezone not in pytz.all_timezones:
raise ParameterError(
"invalid timezone: {0} (see `pytz.all_timezones` for choices)".format(
timezone))
dbs = codes
if not isinstance(dbs, (list, tuple)):
dbs = [dbs]
fields = fields or []
if not isinstance(fields, (list, tuple)):
fields = [fields]
# separate history dbs from Zipline bundles from realtime dbs; in case one or
# more of the services is not running, we print a warning and try the other
# services
try:
history_dbs = set(list_history_databases())
except requests.HTTPError as e:
if e.response.status_code == 502:
import warnings
warnings.warn(
f"Error while checking if {', '.join(dbs)} is a history database, "
f"will assume it's not. Error was: {e}", RuntimeWarning)
history_dbs = set()
else:
raise
try:
realtime_dbs = list_realtime_databases()
except requests.HTTPError as e:
if e.response.status_code == 502:
import warnings
warnings.warn(
f"Error while checking if {', '.join(dbs)} is a realtime database, "
f"will assume it's not. Error was: {e}", RuntimeWarning)
realtime_dbs = {}
realtime_agg_dbs = set()
else:
raise
else:
realtime_agg_dbs = set(itertools.chain(*realtime_dbs.values()))
try:
zipline_bundles = set(list_bundles())
except requests.HTTPError as e:
if e.response.status_code == 502:
import warnings
warnings.warn(
f"Error while checking if {', '.join(dbs)} is a Zipline bundle, "
f"will assume it's not. Error was: {e}", RuntimeWarning)
zipline_bundles = set()
else:
raise
history_dbs.intersection_update(set(dbs))
realtime_agg_dbs.intersection_update(set(dbs))
zipline_bundles.intersection_update(set(dbs))
unknown_dbs = set(dbs) - history_dbs - realtime_agg_dbs - zipline_bundles
if unknown_dbs:
tick_dbs = set(realtime_dbs.keys()).intersection(unknown_dbs)
# Improve error message if possible
if tick_dbs:
raise ParameterError("{} is a real-time tick database, only history databases or "
"real-time aggregate databases are supported".format(
", ".join(tick_dbs)))
raise ParameterError(
"no history or real-time aggregate databases or Zipline bundles called {}".format(
", ".join(unknown_dbs)))
db_bar_sizes = set()
db_bar_sizes_parsed = set()
history_db_fields = {}
realtime_db_fields = {}
zipline_bundle_fields = {}
for db in history_dbs:
db_config = get_history_db_config(db)
bar_size = db_config.get("bar_size")
db_bar_sizes.add(bar_size)
# to validate uniform bar sizes, we need to parse them in case dbs
# store different but equivalent timedelta strings. History db
# strings may need massaging to be parsable.
if bar_size.endswith("s"):
# strip s from secs, mins, hours to get valid pandas timedelta
bar_size = bar_size[:-1]
elif bar_size == "1 week":
bar_size = "7 day"
elif bar_size == "1 month":
bar_size = "30 day"
db_bar_sizes_parsed.add(pd.Timedelta(bar_size))
history_db_fields[db] = db_config.get("fields", [])
for db in realtime_agg_dbs:
db_config = get_realtime_db_config(db)
bar_size = db_config.get("bar_size")
db_bar_sizes.add(bar_size)
db_bar_sizes_parsed.add( | pd.Timedelta(bar_size) | pandas.Timedelta |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
( | TS('2015-01-03') | pandas.Timestamp |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index= | pd.date_range('2019-01-01', periods=3, freq='D') | pandas.date_range |
from sklearn import datasets
from sklearn.datasets import load_breast_cancer
from tensorflow import keras
import pandas as pd
import numpy as np
from src.auxiliary_functions.auxiliary_functions import fd
def fetch_data_set(name: str, samples_per_class_synthetic: int = 100, noise_synthetic: float = 0.1):
"""
Loads the data sets.
Args:
Args:
name: str
Name of the data set.
data set name: # samples / # features / # classes
- 'abalone': 4067 / 10 / 16
- 'banknote': 1372 / 4 / 2
- 'cancer': 569 / 30 / 2
- 'digits': 1797 / 64 / 10
- 'htru2': 17898 / 8 / 2
- 'iris': 150 / 4 / 3
- 'madelon': 2600 / 500 / 2
- 'seeds': 210 / 7 / 3
- 'sonar': 208 / 60 / 2
- 'spam': 4601 / 57 / 2
- 'synthetic': 2 x samples_per_class_synthetic / 3 / 2
- 'voice': 126 / 310 / 2
- 'wine': 178 / 13 / 3
samples_per_class_synthetic: int, Optional
(Default is 100.)
noise_synthetic: int, Optional
(Default is 0.1.)
Returns:
X: np.ndarray
Data.
y: np.ndarray
Labels.
"""
if name == "abalone":
X, y = download_abalone()
elif name == 'banknote':
X, y = download_banknote()
elif name == 'cancer':
X, y = download_cancer()
elif name == 'digits':
X, y = download_digits()
elif name == 'htru2':
X, y = download_htru2()
elif name == 'iris':
X, y = download_iris()
elif name == 'madelon':
X, y = download_madelon()
elif name == 'sonar':
X, y = download_sonar()
elif name == 'spam':
X, y = download_spam()
elif name == 'synthetic':
X, y = create_synthetic_data(samples_per_class_synthetic, noise_synthetic)
elif name == 'seeds':
X, y = download_seeds()
elif name == 'voice':
X, y = download_voice()
elif name == 'wine':
X, y = download_wine()
else:
X, y = None, None
print("No valid data set was selected.")
return fd(X), fd(y)
def download_abalone():
"""
Downloads the 'abalone' data set, turns the 'Sex' category to three numerical features: 'Male', 'Female', and
'Infant', and then delets all classes except the ones with {5, 6, ..., 20} 'Rings', ultimately culminating in a data
set of 4067 samples with 10 features 'Male', 'Female', 'Infant', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', and 'Shell weight' and the label 'Rings'.
Returns:
X: np.array
Data.
y: np.array
Labels.
Data set information:
Predicting the age of abalone from physical measurements. The age of abalone is determined by cutting the shell
through the cone, staining it, and counting the number of rings through a microscope -- a boring and time-
consuming task. Other measurements, which are easier to obtain, are used to predict the age. Further
information, such as weather patterns and location (hence food availability) may be required to solve the
problem. From the original data examples with missing values were removed (the majority having the predicted
value missing), and the ranges of the continuous values have been scaled for use with an ANN (by dividing by
200).
Attribute information:
Given is the attribute name, attribute type, the measurement unit and a brief description. The number of rings
is the value to predict: either as a continuous value or as a classification problem.
Name / Data Type / Measurement Unit / Description
-----------------------------
Sex / nominal / -- / M, F, and I (infant)
Length / continuous / mm / Longest shell measurement
Diameter / continuous / mm / perpendicular to length
Height / continuous / mm / with meat in shell
Whole weight / continuous / grams / whole abalone
Shucked weight / continuous / grams / weight of meat
Viscera weight / continuous / grams / gut weight (after bleeding)
Shell weight / continuous / grams / after being dried
Rings / integer / -- / +1.5 gives the age in years
Class distribution:
Class Examples
----- --------
1 1
2 1
3 15
4 57
5 115
6 259
7 391
8 568
9 689
10 634
11 487
12 267
13 203
14 126
15 103
16 67
17 58
18 42
19 32
20 26
21 14
22 6
23 9
24 2
25 1
26 1
27 2
29 1
----- ----
Total 4177
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("abalone", "https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data")
column_names = ['Sex', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']
dataset = pd.read_csv(dataset_path, names=column_names)
cleanup_nums = {"Sex": {"M": 1, "F": 2, "I": 3}}
dataset = dataset.replace(cleanup_nums)
dataset['Sex'] = dataset['Sex'].map({1: 'Male', 2: 'Female', 3: 'Infant'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset = dataset[['Male', 'Female', 'Infant', 'Length', 'Diameter', 'Height', 'Whole weight',
'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']]
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
smaller = (y <= 20).flatten()
X = X[smaller, :]
y = y[smaller, :]
larger = (y >= 5).flatten()
X = X[larger, :]
y = y[larger, :]
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 4067, "Wrong number of samples."
assert X.shape[1] == 10, "Wrong number of features."
return X, y
def download_banknote():
"""
Downloads the 'banknote' data set, a data set of 1372 samples with 4 features 'Variance of wavelet transformed
image', 'Skewness of wavelet transformed image ','Curtosis of wavelet transformed image', and 'Entropy of image'.
The labels indicate whether a banknote is fake or not.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("banknote", "https://archive.ics.uci.edu/ml/machine-learning-databases/00267/data_banknote_authentication.txt")
column_names = ['Variance of wavelet transformed image', 'Skewness of wavelet transformed image ',
'Curtosis of wavelet transformed image', 'Entropy of image', 'Class']
dataset = pd.read_csv(dataset_path, names=column_names)
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 1372, "Wrong number of samples."
assert X.shape[1] == 4, "Wrong number of features."
return X, y
def download_cancer():
"""Downloads the 'cancer' data set. It consists of 569 samples of 30 features, which are used to predict whether a
tumor is benign or malignant.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
X, y = load_breast_cancer(return_X_y=True)
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 569, "Wrong number of samples."
assert X.shape[1] == 30, "Wrong number of features."
return X, y
def download_digits():
"""
Downloads the 'digits' data set, a data set of 1797 samples with 64 features and 10 classes. The goal is to determine
the hand-written number corresponding to each sample.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
digits = datasets.load_digits()
X = digits.data
y = digits.target
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 1797, "Wrong number of samples."
assert X.shape[1] == 64, "Wrong number of features."
return X, y
def download_htru2():
"""
Downloads the 'htru2' data set, a data set of 17898 samples with 8 features and 2 classes. Candidates must be
classified in to pulsar and non-pulsar classes to aid discovery.
htru2 is a data set which describes a sample of pulsar candidates collected during the High Time Resolution Universe
Survey (South) [1].
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
[1] <NAME> et al., 'The High Time Resolution Universe Pulsar Survey - I. System Configuration and Initial
Discoveries',2010, Monthly Notices of the Royal Astronomical Society, vol. 409, pp. 619-627.
DOI: 10.1111/j.1365-2966.2010.17325.x
[2] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
try:
dataset = pd.read_csv("data_sets/HTRU_2/HTRU_2.csv", header=None, engine='python')
except:
dataset = pd.read_csv("../data_sets/HTRU_2/HTRU_2.csv", header=None, engine='python')
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 17898, "Wrong number of samples."
assert X.shape[1] == 8, "Wrong number of features."
return X, y
def download_iris():
"""
Downloads the 'iris' data set, a data set of 150 samples with 4 features 'sepal length in cm', 'sepal width in cm',
'petal length in cm', and 'petal width in cm'. The goal is to determine to which of the three classes each sample
belongs.
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
iris = datasets.load_iris()
X = iris.data
y = iris.target
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 150, "Wrong number of samples."
assert X.shape[1] == 4, "Wrong number of features."
return X, y
def download_madelon():
"""
Downloads the training and validation samples of the 'madelon' data set, a binary classification data set
totalling 2600 samples with 500 features.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>, 2004. Result analysis of the NIPS 2003 feature
selection challenge. In: NIPS. [Web Link].
[2] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
path_X_train = keras.utils.get_file("madelon_train_data", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.data")
X_train = pd.read_csv(path_X_train, sep=" ", header=None).to_numpy()[:, :-1]
path_y_train = keras.utils.get_file("madelon_train_labels", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.labels")
y_train = pd.read_csv(path_y_train, sep=" ", header=None).to_numpy()
path_X_valid = keras.utils.get_file("madelon_valid_data", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_valid.data")
X_valid = pd.read_csv(path_X_valid, sep=" ", header=None).to_numpy()[:, :-1]
path_y_valid = keras.utils.get_file("madelon_valid_labels", "https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/madelon_valid.labels")
y_valid = pd.read_csv(path_y_valid, sep=" ", header=None).to_numpy()
X = np.vstack((X_train, X_valid))
y = np.vstack((y_train, y_valid))
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 2600, "Wrong number of samples."
assert X.shape[1] == 500, "Wrong number of features."
return X, y
def download_seeds():
"""
Downloads the 'seeds' data set, called 'seeds', consisting of 7 input variables and 210 observations.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("seeds", "https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt")
dataset = pd.read_csv(dataset_path, sep='\s+', header=None, engine='python')
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 210, "Wrong number of samples."
assert X.shape[1] == 7, "Wrong number of features."
return X, y
def download_sonar():
"""
Downloads the 'sonar' data set, which consists of 208 samples of 60 features. The goal is to classify whether an
object is a rock or a mine.
References:
The data set was contributed to the benchmark collection by <NAME>, now at the Salk Institute and the
University of California at San Deigo. The data set was developed in collaboration with <NAME> of
Allied-Signal Aerospace Technology Center
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("sonar", "https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data")
dataset = pd.read_csv(dataset_path, header=None)
dataset = dataset.replace(['M', 'R'], [0, 1])
cleanup_nums = {"Sex": {"M": 1, "F": 2, "I": 3}}
dataset = dataset.replace(cleanup_nums)
dataset = dataset.to_numpy()
X = dataset[:, :-1]
y = fd(dataset[:, -1])
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 208, "Wrong number of samples."
assert X.shape[1] == 60, "Wrong number of features."
return X, y
def download_spam():
"""
Downloads the 'spam' spambase data set, a binary classification data set totalling 4601 samples with 57
features.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
dataset_path = keras.utils.get_file("spam", "https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data")
dataset = pd.read_csv(dataset_path, header=None).to_numpy()
X = dataset[:, :-1]
y = dataset[:, -1]
assert X.shape[0] == y.shape[0], "Number of data points does not coincide with the number of labels."
assert X.shape[0] == 4601, "Wrong number of samples."
assert X.shape[1] == 57, "Wrong number of features."
return X, y
def create_synthetic_data(samples_per_class: int = 100, noise: float = 0.1):
"""
Creates a 'synthetic' data set of samples_per_class samples per class with noise Gaussian noise.
Creates a synthetic data set consisting of two classes:
Samples of class 1 satisfy x_1^2 + 0.01 * x_2^2 + x_3^2 - 1 = 0.
Samples of class 2 satisfy x_1^2 + x_3^2 - 1.4 = 0.
Args:
samples_per_class: int, Optional
(Default is 100.)
noise: int, Optional
(Default is 0.1.)
Returns:
X: np.ndarray
y: np.ndarray
"""
# class 1
x_1 = np.random.random((samples_per_class, 1)) * 0.99
x_2 = np.random.random((samples_per_class, 1))
x_3 = np.sqrt(np.ones((samples_per_class, 1)) - x_1 ** 2 - 0.01 * x_2 ** 2)
X_1 = np.hstack((x_1, x_2, x_3))
y_1 = fd(np.zeros((samples_per_class, 1)))
assert (np.abs(
(X_1[:, 0] ** 2 + 0.01 * X_1[:, 1] ** 2 + X_1[:, 2] ** 2 - np.ones((samples_per_class, 1)))) <= 10e-10).all()
# class 2
x_1 = np.random.random((samples_per_class, 1))
x_2 = np.random.random((samples_per_class, 1))
x_3 = np.sqrt(1.4 * np.ones((samples_per_class, 1)) - x_1 ** 2)
X_2 = np.hstack((x_1, x_2, x_3))
y_2 = fd(np.ones((samples_per_class, 1)))
assert (np.abs((X_2[:, 0] ** 2 + X_2[:, 2] ** 2 - 1.4 * np.ones((samples_per_class, 1)))) <= 10e-10).all()
X = np.vstack((X_1, X_2))
y = np.vstack((y_1, y_2))
# add noise
noise_matrix = np.random.normal(0, 0.1, X.shape)
X = X + noise_matrix
# embedd in R10
# X = tmp_X.dot(np.random.normal(0, 0.5, (4, 10)))
# correlation_matrix = np.array([[1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
# [0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
# [0, 0, 1, 0, 0, 1, 0, 0, 1, 0],
# ])
correlation_matrix_tmp = np.random.normal(0, noise, (10, 10))
correlation_matrix = np.triu(correlation_matrix_tmp.dot(correlation_matrix_tmp.T) + np.identity(10))[:3, :10]
# print(correlation_matrix)
X = X.dot(correlation_matrix)
assert X.shape[1] == 10, "Wrong number of features."
return X, y
def download_voice():
"""
Downloads the lsvt 'voice' rehabilitation data set, a data set of 126 samples with 310 features. Aim: assess whether
voice rehabilitation treatment lead to phonations considered 'acceptable' or 'unacceptable' (binary class
classification problem).
Returns:
X: np.array
Data.
y: np.array
Labels.
References:
<NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science.
"""
try:
X = | pd.read_excel("data_sets/lsvt/LSVT_voice_rehabilitation.xlsx", sheet_name="Data") | pandas.read_excel |
"""
Script for plotting Figures 3, 5, 6
"""
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
import matplotlib.ticker as mtick
import seaborn as sns
from datetime import timedelta
import plotly.graph_objects as go
countries = ['Brazil', 'Canada', 'England', 'France', 'Germany', 'India', 'Japan', 'Scotland', 'USA', 'Wales']
samp_entropy_df = {}
for country in countries:
df = pd.read_csv(f'data/entropy/monthly/fast_samp_entropy_monthly_{country}.csv')
df['Date'] = pd.to_datetime(df['Date'])
df = df[(df['Date'] >= '03-01-2020') & (df['Date'] <= '06-30-2021')]
samp_entropy_df[country] = df
colors = {'France':'darkblue', 'Germany':'dodgerblue', 'Northern-Ireland':'yellowgreen',
'USA':'orange', 'Canada':'red'}
########## Fig 3
fig, ax = plt.subplots(1,2, figsize=(16,4), sharey=True)
fig.subplots_adjust(hspace=0.2, wspace=0.1)
for country in ['France', 'Germany']:
v = samp_entropy_df[country].iloc[:,1:37].sum(axis=0)
if country == 'Germany':
ax[0].plot(v, color = colors[country], label=country, alpha=0.5)
else:
ax[0].plot(v, color = colors[country], label=country)
ax[0].legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0, fontsize=12)
ax[0].set_xticks(list(range(0,36,2)))
ax[0].set_xticklabels(list(range(1,37,2)), fontsize=12)
ax[0].set_xlabel('Dimension', fontsize=13, labelpad=10)
for country in ['USA', 'Canada']:
v = samp_entropy_df[country].iloc[:,1:37].sum(axis=0)
ax[1].plot(v, color = colors[country], label=country)
ax[1].legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0, fontsize=12)
ax[1].set_xticks(list(range(0,36,2)))
ax[1].set_xticklabels(list(range(1,37,2)), fontsize=12)
ax[1].set_xlabel('Dimension', fontsize=13, labelpad=10)
fig.text(0.09, 0.5, 'Sum of Entropies', va='center', rotation='vertical', fontsize=13)
plt.suptitle("Sum of Sample Entropies for each dimension", fontsize=16)
fig.savefig("figures/Fig2.png", dpi=500, bbox_inches = 'tight')
plt.show()
########## Fig 5
def get_melted_df(df, dim_nums):
melted_df = pd.DataFrame()
for dim in dim_nums:
entropy_dim = f'Entropy_{dim}'
subdf = df[['Date', entropy_dim]]
subdf = subdf.rename(columns = {entropy_dim: 'Entropy'})
melted_df = pd.concat([melted_df, subdf], axis=0, ignore_index=True)
return melted_df
cases_df = pd.read_csv('new_cases.csv')
cases_df = cases_df.rename(columns = {'Unnamed: 0': 'month'})
cases_df['month'] = pd.to_datetime(cases_df['month'])
cases_df = cases_df[(cases_df['month'] >= '03-01-2020') & (cases_df['month'] <= '06-30-2021')]
rf1 = pd.read_csv('predictions/RF_preds.csv')
rf2 = | pd.read_csv('predictions/RF_infer_preds.csv') | pandas.read_csv |
import os
from root import *
import xgboost
from xgboost import XGBRegressor
import pickle
import pandas as pd
import datetime
from preprocessing.data_utils import *
from datetime import datetime, timedelta
pd.set_option('display.max_columns', 100)
train_all_x = pd.read_csv(root+"/data/interim/train_all_x.csv")
train_all_y = pd.read_csv(root+"/data/interim/train_all_y.csv")
future_input = pd.read_csv(root+"/data/interim/future_input.csv")
test_preds_plot = | pd.read_csv(root+"/data/interim/test_preds_plot.csv") | pandas.read_csv |
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- <EMAIL>
# I am not affiliated, associated, authorized, endorsed by, or in any way officially connected with rapid7.com, or any of its subsidiaries or its affiliates.
# https://opendata.rapid7.com/sonar.http/
# https://github.com/rapid7/sonar/wiki/HTTP
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
# NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
# DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
# WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Could use some help to rent a bigger VPS to get the bigger datasets!
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
import json
import gzip
import base64
import csv
import glob
import re
import pandas as pd
OUTFILE = "outfile.csv"
bypass_err = [
"404 Not Found",
"403 Forbidden",
"403",
"404",
"ERROR: The requested URL could not be retrieved",
"400 Bad Request",
"Not Found",
"Moved Permanently",
"Unauthorized",
"401 Authorization Required",
"502 Bad Gateway",
"407 Proxy Authentication Required",
"Access Denied",
"Document moved",
"Bad Request",
"400 The plain HTTP request was sent to HTTPS port",
"401",
"503",
"ERROR: Forbidden",
"Error"
]
header_section_re = re.compile(
r'HTTP/(?P<version>\d+\.?\d*) (?P<status>\d+ .+?)((\r\n)|(\n))(?P<headers>.+?)((\r\n\r\n)|(\n\n))',
flags=re.DOTALL)
header_re = re.compile('(?P<name>.+?): (?P<value>.+)(\r?)')
title_re = re.compile(
r'(\<title\>)(?P<title>.+?)(\</title\>)',
flags=re.IGNORECASE)
server_re = re.compile('Server: (?P<server>.+?)\r?\n', flags=re.IGNORECASE)
content_type_re = re.compile(
'Content-Type: (?P<content_type>.+?)\r?\n',
flags=re.IGNORECASE)
content_encoding_re = re.compile(
'Content-Encoding: (?P<content_encoding>.+?)\r?\n',
flags=re.IGNORECASE)
last_modified_re = re.compile(
'Last-Modified: (?P<last_modified>.+?)\r?\n',
flags=re.IGNORECASE)
x_powered_by_re = re.compile(
'X-Powered-By: (?P<x_powered_by>.+?)\r?\n',
flags=re.IGNORECASE)
def write_to_csv(row):
with open(OUTFILE, 'a+') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(row)
csvfile.close()
def get_x_powered_by(html):
match_obj = x_powered_by_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('x_powered_by').strip()
return match_obj
def get_content_type(html):
match_obj = content_type_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('content_type').strip()
return match_obj
def get_content_encoding(html):
match_obj = content_encoding_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('content_encoding').strip()
return match_obj
def get_server(html):
match_obj = server_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('server').strip()
return match_obj
def get_title(html):
match_obj = title_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('title').strip()
return match_obj
def get_last_modified(html):
match_obj = last_modified_re.search(html)
if match_obj is not None:
match_obj = match_obj.group('last_modified').strip()
return match_obj
def to_ascii(data):
if isinstance(data, str):
return data.encode("ascii", errors="ignore")
elif isinstance(data, bytes):
return data.decode("ascii", errors="ignore")
def to_utf(data):
if isinstance(data, str):
return data.encode("utf8", errors="ignore")
elif isinstance(data, bytes):
return data.decode("utf8", errors="ignore")
server_vers = []
host_server =[]
ignore_hosts = ["Apache", "nginx", "Microsoft", "lighttpd", "xxxxxxxx-xxxxx", "xxxx"]
def parse_json_zips():
for INFILE in glob.glob("*.json.gz"):
#######################################################################
# this is needed to identify the different data sets, by port number.
#######################################################################
port_no = INFILE.replace('.json.gz', '')
port_no = str(port_no.split('_')[-1])
#######################################################################
# this is needed to identify the different data sets, by port number.
#######################################################################
print("[+] debug, port:..." + str(port_no))
with gzip.open(INFILE) as f:
print('[+] Parsing JSON: {}'.format(INFILE))
for line in f:
tmp_lst = []
html_data = json.loads(line)
decoded_data = to_ascii(base64.b64decode(html_data["data"]))
server_header_name = get_server(decoded_data)
if server_header_name is not None:
if not any(x in server_header_name for x in ignore_hosts): #remove common servers
tmp_lst.append(str(html_data["host"]) + ":" + str(port_no))
#tmp_lst.append(get_title(decoded_data)) #not in use, Apr 2020
tmp_lst.append(server_header_name)
server_vers.append(server_header_name)
#tmp_lst.append(get_content_type(decoded_data)) #not in use, Apr 2020
#tmp_lst.append(get_content_encoding(decoded_data)) #not in use, Apr 2020
#tmp_lst.append(get_x_powered_by(decoded_data)) #not in use, Apr 2020
#tmp_lst.append(get_last_modified(decoded_data)) #not in use, Apr 2020
host_server.append(tmp_lst)
#write_to_csv(tmp_lst) #not in use, Apr 2020
def parse_json_headers():
headers = {}
for INFILE in glob.glob("*.json.gz"):
# this is needed to identify the different data sets, by port number.
port_no = INFILE.replace('.json.gz', '')
# this is needed to identify the different data sets, by port number.
port_no = str(port_no.split('_')[-1])
# this is needed to identify the different data sets, by port number.
print("[+]debug, port:..." + str(port_no))
with gzip.open(INFILE) as f:
print('[+] Parsing JSON: {}'.format(INFILE))
for line in f:
html_data = json.loads(line)
decoded_data = to_ascii(base64.b64decode(html_data["data"]))
match_obj = header_section_re.search(decoded_data)
if match_obj is None:
continue
for header in match_obj.group('headers').split('\n'):
header_match = header_re.search(header)
if header_match is None:
continue
if header_match.group('name').strip() not in headers:
headers[header_match.group('name').strip()] = 0
headers[header_match.group('name').strip()] += 1
return headers
parse_json_zips()
#debug,debug,debug,debug ----> counting the occurances of each type to fine tune. Helps create ignore_hosts above
print( | pd.Series(server_vers) | pandas.Series |
import sys
import nltk
import time
import pickle
import re
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
def load_data(database_filepath: str):
""""
load messages table from sqlite database and splits the table into:
X: message str, training feature
Y: target variables
parameters
-----------
database_filename: str
Database name using the following format 'sqlite:///<database name>.db'
returns
-------
X: pd.DataFrame
message str, training feature
Y: pd.DataFrame
target variables
category_names: list
list containing categories to be used
"""
engine = create_engine('sqlite:///'+database_filepath)
df = | pd.read_sql_table("messages", engine) | pandas.read_sql_table |
import pandas as pd
import numpy as np
import os
import requests
import time
directory = 'C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/'
playerli = []
# loop through player files and add to data frame
for filename in os.listdir(directory):
if filename.startswith("PLAYER_"):
#print(os.path.join(directory, filename))
df = | pd.read_csv(directory+filename, sep=';', index_col=None, header=0) | pandas.read_csv |
'''
Para o r/brasil
Pesquisa, análise e gráficos por:
u/Drunpy
• Estrutura do código:
• Apresentação
• Imports
• Separação dos dados
• Gráficos
'''
#IMPORTS
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
pesquisa = pd.read_csv("Pesquisa T.csv")
df = pd.DataFrame(pesquisa)
#SEPARAÇÃO DOS DADOS
#Renomeando as colunas para facilitar a leitura
df.columns = ["data", "estado", "idade", "sexo", "renda", "estadocivil", "jatraiu", "jafoitraido", "comoseusamigosencaramisso", "comovcencaraisso", "nafamilia", "qmtraimais", "Sugestoesoucomentarios"]
#Lista com todos os estado para serem usados na separação dos dados
estadosunico = ['MG', 'SC', 'RS', 'SP', 'RJ', 'PE', 'GO', 'DF', 'BA', 'CE', 'PR', 'AL', 'ES', 'RN', 'SE', 'PB', 'TO', 'PI', 'PA', 'RO', 'MT', 'MA', 'AM', 'RR', 'MS', 'AP']
#Abaixo todos os códigos tem intenção de filtrar os dados
estado_x_pessoas = {}
estados_validos = {}
for i in estadosunico:
qnt = np.count_nonzero(df.estado == i)
estado_x_pessoas[i] = int(qnt)
#Quantidade de participantes >=19 para o estado ser válido
for k,v in estado_x_pessoas.items():
if v >= 19:
estados_validos[k] = v
#Separando qnt de traidores(pessoas) por estado
estado_x_jatraiu = {}
for i in estados_validos:
qntjatraiu = np.count_nonzero((df.estado == i) & (df.jatraiu == 'Não'))
estado_x_jatraiu[i] = int(qntjatraiu)
#Extraindo opcoes de idade
idadeops = []
for i in df.idade:
if i not in idadeops:
idadeops.append(i)
#correlacionando idades + traidores
idade_traiuqnt = {}
for i in idadeops:
qntraidores = np.count_nonzero((df.idade == i) & (df.jatraiu == 'Sim'))
idade_traiuqnt[i] = int(qntraidores)
#Total de pessoas por idade
total_poridade = {}
for i in idadeops:
cont = np.count_nonzero(df.idade == i)
total_poridade[i] = cont
#Correlacionando participantes e renda
rendas_un = []
total_prenda ={}
for i in df.renda:
if i not in rendas_un:
rendas_un.append(i)
#^GAMBIARRA para trocar o nome do Xticks no gráfico
total_prenda = {'<1000': 78, 'Entre 1000 e 2000': 71, '8000+': 46, 'Entre 3000 e 5000': 49, 'Entre 2000 e 3000': 46, 'Entre 5000 e 8000': 28}
#Quem trai mais
traimais = {'Mulher': 0 , 'Homem': 0}
for i in df.qmtraimais:
mulher = np.count_nonzero(df.qmtraimais == 'Mulher')
homem = np.count_nonzero(df.qmtraimais == 'Homem')
traimais['Mulher'] = mulher
traimais['Homem'] = homem
#Correlação sexo + Trai (qual dos sexos traiu mais)
sexo_traicao_sim = {}
for i in df.sexo:
ses = np.count_nonzero((df.sexo == i) & (df.jatraiu == 'Sim'))
if ses != 0:
sexo_traicao_sim[i] = ses
sexo_traicao_nao ={}
for i in df.sexo:
sen = np.count_nonzero((df.sexo == i) & (df.jatraiu == 'Não'))
if sen != 0:
sexo_traicao_nao[i] = sen
#O que você acha sobre traicao ?
opn_g = {}
for i in df.comovcencaraisso:
como = np.count_nonzero(df.comovcencaraisso == i)
opn_g[i] = como
'''
#Muitos dados para um intel pentium... SKIPED
#Correl sexo/jafoitraido
sexo_jafoitraido = {}
for sexo in df.sexo:
for i in df.jafoitraido:
jafoi = np.count_nonzero((df.sexo == sexo) & (df.jafoitraido == i))
if jafoi != 0:
sexo_jafoitraido[sexo] = jafoi
'''
#GRAFICOS
def total_pessoas():
exp = pd.DataFrame.from_dict(estados_validos, orient='index')
exp.plot(kind='pie',
subplots=True,
shadow=True,
explode=[0,0 ,0.05 ,0 , 0],
labeldistance=1.1,
autopct='%.1f%%')
plt.title('Participações validadas', fontsize=15)
plt.suptitle('Não foram obtidos dados significativos\n de outros estados.', y=0.88, fontsize=6)
plt.show()
def mulhertraida():
dfmulhertraida = pd.DataFrame.from_dict(sexo_jafoitraido, orient='index')
def opn_geral():
dfopng = pd.DataFrame.from_dict(opn_g, orient='index')
dfopng.plot(kind='pie',
subplots=True,
shadow=True,
explode=[0.05, 0 ,0 ,0 , 0],
autopct = '%.1f%%')
plt.title('O que as pessoas acham sobre traição ?', size=20)
plt.suptitle('**Do total de respostas.', y=0.88, size=12)
plt.show()
def sexotraicaosim():
dfsts = pd.DataFrame.from_dict(sexo_traicao_sim, orient='index')
dfsts.plot(kind='pie',
subplots=True,
shadow=True,
explode= [0, 0.1, 0,0],
autopct='%.1f%%')
plt.title('Os que já trairam são: ', size=20)
plt.suptitle('**Ou opção sexual', y=0.88, size=9)
plt.show()
def traidores_estado():
dfestadosvalidos = pd.DataFrame.from_dict(estados_validos, orient='index')
dfestadosvalidos.columns = ['estado']
dfestado = | pd.DataFrame.from_dict(estado_x_jatraiu, orient='index') | pandas.DataFrame.from_dict |
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = | pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16") | pandas.array |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
from sqlite3 import connect
from scipy.io import loadmat
import io
from rich.progress import track
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from research.utils import img_array_to_pandas
UCI_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/"
KEEL_URL = "http://sci2s.ugr.es/keel/keel-dataset/datasets/imbalanced/"
GIC_URL = "http://www.ehu.eus/ccwintco/uploads/"
OPENML_URL = "https://www.openml.org/data/get_csv/"
FETCH_URLS = {
"breast_tissue": urljoin(UCI_URL, "00192/BreastTissue.xls"),
"ecoli": urljoin(UCI_URL, "ecoli/ecoli.data"),
"eucalyptus": urljoin(OPENML_URL, "3625/dataset_194_eucalyptus.arff"),
"glass": urljoin(UCI_URL, "glass/glass.data"),
"haberman": urljoin(UCI_URL, "haberman/haberman.data"),
"heart": urljoin(UCI_URL, "statlog/heart/heart.dat"),
"iris": urljoin(UCI_URL, "iris/bezdekIris.data"),
"libras": urljoin(UCI_URL, "libras/movement_libras.data"),
"liver": urljoin(UCI_URL, "liver-disorders/bupa.data"),
"pima": "https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f"
"/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv",
"vehicle": urljoin(UCI_URL, "statlog/vehicle/"),
"wine": urljoin(UCI_URL, "wine/wine.data"),
"new_thyroid_1": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid1.zip"
),
"new_thyroid_2": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid2.zip"
),
"cleveland": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "cleveland-0_vs_4.zip"
),
"led": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "led7digit-0-2-4-5-6-7-8-9_vs_1.zip"
),
"page_blocks_1_3": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "page-blocks-1-3_vs_4.zip"
),
"vowel": urljoin(urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "vowel0.zip"),
"yeast_1": urljoin(urljoin(KEEL_URL, "imb_IRlowerThan9/"), "yeast1.zip"),
"banknote_authentication": urljoin(
UCI_URL, "00267/data_banknote_authentication.txt"
),
"arcene": urljoin(UCI_URL, "arcene/"),
"audit": urljoin(UCI_URL, "00475/audit_data.zip"),
"spambase": urljoin(UCI_URL, "spambase/spambase.data"),
"parkinsons": urljoin(UCI_URL, "parkinsons/parkinsons.data"),
"ionosphere": urljoin(UCI_URL, "ionosphere/ionosphere.data"),
"breast_cancer": urljoin(UCI_URL, "breast-cancer-wisconsin/wdbc.data"),
"adult": urljoin(UCI_URL, "adult/adult.data"),
"abalone": urljoin(UCI_URL, "abalone/abalone.data"),
"acute": urljoin(UCI_URL, "acute/diagnosis.data"),
"annealing": urljoin(UCI_URL, "annealing/anneal.data"),
"census": urljoin(UCI_URL, "census-income-mld/census-income.data.gz"),
"contraceptive": urljoin(UCI_URL, "cmc/cmc.data"),
"covertype": urljoin(UCI_URL, "covtype/covtype.data.gz"),
"credit_approval": urljoin(UCI_URL, "credit-screening/crx.data"),
"dermatology": urljoin(UCI_URL, "dermatology/dermatology.data"),
"echocardiogram": urljoin(UCI_URL, "echocardiogram/echocardiogram.data"),
"flags": urljoin(UCI_URL, "flags/flag.data"),
"heart_disease": [
urljoin(UCI_URL, "heart-disease/processed.cleveland.data"),
urljoin(UCI_URL, "heart-disease/processed.hungarian.data"),
urljoin(UCI_URL, "heart-disease/processed.switzerland.data"),
urljoin(UCI_URL, "heart-disease/processed.va.data"),
],
"hepatitis": urljoin(UCI_URL, "hepatitis/hepatitis.data"),
"german_credit": urljoin(UCI_URL, "statlog/german/german.data"),
"thyroid": urljoin(UCI_URL, "thyroid-disease/thyroid0387.data"),
"first_order_theorem": urljoin(OPENML_URL, "1587932/phpPbCMyg"),
"gas_drift": urljoin(OPENML_URL, "1588715/phpbL6t4U"),
"autouniv_au7": urljoin(OPENML_URL, "1593748/phpmRPvKy"),
"autouniv_au4": urljoin(OPENML_URL, "1593744/phpiubDlf"),
"mice_protein": urljoin(OPENML_URL, "17928620/phpchCuL5"),
"steel_plates": urljoin(OPENML_URL, "18151921/php5s7Ep8"),
"cardiotocography": urljoin(OPENML_URL, "1593756/phpW0AXSQ"),
"waveform": urljoin(OPENML_URL, "60/dataset_60_waveform-5000.arff"),
"volkert": urljoin(OPENML_URL, "19335689/file1c556e3db171.arff"),
"asp_potassco": urljoin(OPENML_URL, "21377447/file18547f421393.arff"),
"wine_quality": urljoin(OPENML_URL, "4965268/wine-quality-red.arff"),
"mfeat_zernike": urljoin(OPENML_URL, "22/dataset_22_mfeat-zernike.arff"),
"gesture_segmentation": urljoin(OPENML_URL, "1798765/phpYLeydd"),
"texture": urljoin(OPENML_URL, "4535764/phpBDgUyY"),
"usps": urljoin(OPENML_URL, "19329737/usps.arff"),
"japanese_vowels": urljoin(OPENML_URL, "52415/JapaneseVowels.arff"),
"pendigits": urljoin(OPENML_URL, "32/dataset_32_pendigits.arff"),
"image_segmentation": urljoin(OPENML_URL, "18151937/phpyM5ND4"),
"baseball": urljoin(OPENML_URL, "3622/dataset_189_baseball.arff"),
"indian_pines": [
urljoin(GIC_URL, "2/22/Indian_pines.mat"),
urljoin(GIC_URL, "c/c4/Indian_pines_gt.mat"),
],
"salinas": [
urljoin(GIC_URL, "f/f1/Salinas.mat"),
urljoin(GIC_URL, "f/fa/Salinas_gt.mat"),
],
"salinas_a": [
urljoin(GIC_URL, "d/df/SalinasA.mat"),
urljoin(GIC_URL, "a/aa/SalinasA_gt.mat"),
],
"pavia_centre": [
urljoin(GIC_URL, "e/e3/Pavia.mat"),
urljoin(GIC_URL, "5/53/Pavia_gt.mat"),
],
"pavia_university": [
urljoin(GIC_URL, "e/ee/PaviaU.mat"),
urljoin(GIC_URL, "5/50/PaviaU_gt.mat"),
],
"kennedy_space_center": [
urljoin(GIC_URL, "2/26/KSC.mat"),
urljoin(GIC_URL, "a/a6/KSC_gt.mat"),
],
"botswana": [
urljoin(GIC_URL, "7/72/Botswana.mat"),
urljoin(GIC_URL, "5/58/Botswana_gt.mat"),
],
}
RANDOM_STATE = 0
class Datasets:
"""Base class to download and save datasets."""
def __init__(self, names="all"):
self.names = names
@staticmethod
def _modify_columns(data):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1)
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data = self._modify_columns(fetch_data())
self.content_.append((name, data))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data = pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
data = data.drop(columns=0).rename(columns={8: "target"})
data["target"] = data["target"].isin(["pp"]).astype(int)
return data
def fetch_eucalyptus(self):
"""Download and transform the Eucalyptus Data Set.
The minority class is identified as the `best` label
and the majority class as the rest of the labels.
https://www.openml.org/d/188
"""
data = pd.read_csv(FETCH_URLS["eucalyptus"])
data = data.iloc[:, -9:].rename(columns={"Utility": "target"})
data = data[data != "?"].dropna()
data["target"] = data["target"].isin(["best"]).astype(int)
return data
def fetch_glass(self):
"""Download and transform the Glass Identification Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/glass+identification
"""
data = pd.read_csv(FETCH_URLS["glass"], header=None)
data = data.drop(columns=0).rename(columns={10: "target"})
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_haberman(self):
"""Download and transform the Haberman's Survival Data Set.
The minority class is identified as the `1` label
and the majority class as the `0` label.
https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
"""
data = pd.read_csv(FETCH_URLS["haberman"], header=None)
data.rename(columns={3: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_heart(self):
"""Download and transform the Heart Data Set.
The minority class is identified as the `2` label
and the majority class as the `1` label.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = | pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation_list": [["A", "B"], ["A"], ["C"], ["B"], ["C"], ["A"]],
"duration": [
pd.to_timedelta(1, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
pd.to_timedelta(0, "min"),
],
"number_events": [2, 1, 1, 1, 1, 1],
}
)
self.df2_with_ses_col_2 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 3, 4],
}
)
self.df2_sessionized_2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
| pd.to_datetime("2020-01-03 01:00:00") | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
def send_email(con="你好!"):
_user = "<EMAIL>"
_pwd = "<PASSWORD>"
_to = "<EMAIL>"
# 使用MIMEText构造符合smtp协议的header及body
msg = MIMEMultipart('related')
subject = "以下是今天的报表,请查收!--" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# # msgRoot['From'] = Header("菜鸟教程", 'utf-8')
msg["Subject"] = subject
msg["From"] = _user
msg["To"] = _to
# 可供选择的内容
msgAlternative = MIMEMultipart('alternative')
msg.attach(msgAlternative)
mail_msg1 = '<p><img src="cid:image1"></p>'
mail_msg2 = '<p><img src="cid:image2"></p>'
mail_msg3 = '<p><img src="cid:image3"></p>'
mail_msg4 = '<p><img src="cid:image4"></p>'
mail_msg = mail_msg1 + mail_msg2 + mail_msg3 + mail_msg4
msgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
with open("pic1.png", "rb") as f:
msgImage1 = MIMEImage(f.read())
with open("pic2.png", "rb") as f:
msgImage2 = MIMEImage(f.read())
with open("pic3.png", "rb") as f:
msgImage3 = MIMEImage(f.read())
with open("pic4.png", "rb") as f:
msgImage4 = MIMEImage(f.read())
# 定义图片 ID,在 HTML 文本中引用
msgImage1.add_header('Content-ID', '<image1>')
msgImage2.add_header('Content-ID', '<image2>')
msgImage3.add_header('Content-ID', '<image3>')
msgImage4.add_header('Content-ID', '<image4>')
msg.attach(msgImage1)
msg.attach(msgImage2)
msg.attach(msgImage3)
msg.attach(msgImage4)
s = smtplib.SMTP("smtp.sina.com", 25, timeout=30) # 连接smtp邮件服务器,端口默认是25
s.login(_user, _pwd) # 登陆服务器
s.sendmail(_user, _to, msg.as_string()) # 发送邮件
s.close()
def get_pic():
df = pd.read_csv("./count.csv", index_col=None, names=["total"],
parse_dates=True, dtype={"total": np.int64}, encoding="utf-8")
end_day = str(time.strftime("%Y-%m-%d 00:00:00", time.localtime()))
df = df.loc["2018-01-03 00:00:00":end_day]
# plt.figure(1)
# 显示总量变化 采样频率为1分钟
# 绘图
df.plot()
plt.title('Total counts change each minute', color='#123456')
plt.savefig("pic1.png")
# plt.show()
# 降低采样频率 以一小时为单位显示总量变化
a = df.resample('1H').mean()
# 绘图
a.plot() # marker="v" 点的形状
plt.title('Total counts change each hour', color='#123456')
plt.savefig("pic2.png")
# plt.show()
# 以一小时为单位显示采集速度
increment = df['total'].diff()
dif = pd.DataFrame({"crawl speed": increment}, index=df.index)
b = dif.resample('1H').sum()
# 绘图
b.plot()
plt.title('Crawl speed each day', color='#123456')
plt.savefig("pic3.png")
# plt.show()
# 显示每天采集的量
c = dif.resample('1D').sum()
# 绘图
c.plot(kind="bar", figsize=(10, 6))
plt.title('Crawl Data Quantity Of HLJ Spider Each Day', color='#123456')
plt.xlabel("DateTime")
plt.ylabel("Data Total(row)")
plt.ylim(0, 130000)
plt.grid(True)
plt.text(2, 100000, r'$mu=100, sigma=15$')
plt.savefig("pic4.png")
# plt.axis([40, 160, 0, 0.03])
# plt.show()
def data_li(s):
st = ""
for i in str(s).split("\n")[1:]:
a = "<li>" + i + "</li>"
st += a
return st
def deal_data():
df = pd.read_csv("./count.csv", index_col=None, names=["total"],
parse_dates=True, dtype={"total": np.int64}, encoding="utf-8")
# 获取今天零点的时间字符串
end_day = str(time.strftime("%Y-%m-%d 00:00:00", time.localtime()))
# 获取总数
total = df.iat[-1, -1]
s1 = "<li>以下是近期的采集数据报告</li>"
s2 = "<li>截自目前采集总数为:%d 条</li>" % total
# 获得近10分钟的采集速度
increment = df['total'].diff()
recent_status = pd.DataFrame({"Crawl Speed": increment}, index=df.index)
s3 = "<li>近10分钟采集速度 (条)</li>"
s4 = str(recent_status[-10:])
s4 = data_li(s4)
# 获取今日之前每日的采集量
df_standard = df.loc["2018-01-03 00:00:00":end_day]
# 生成每分钟速度DF
increment = df_standard['total'].diff()
df_speed_of_min = pd.DataFrame({"Crawl Speed": increment}, index=df_standard.index)
# 构造不同精度DF
each_day_speed_sum = df_speed_of_min.resample('1D').sum()
each_day_speed_sum["Crawl Speed"] = each_day_speed_sum["Crawl Speed"].astype(np.int64)
each_day_speed_sum = each_day_speed_sum.sort_index(axis=0)
each_day_speed_mean = df_speed_of_min.resample('1D').mean()
each_day_speed_mean["Crawl Speed"] = each_day_speed_mean["Crawl Speed"].astype(np.float32)
# 控制台打印时显示两位小数
| pd.set_option('precision', 2) | pandas.set_option |
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
from dateutil.relativedelta import relativedelta
import bisect
import copy
import itertools
import json
import logging
import math
import time
import warnings
from abc import abstractmethod, ABCMeta
from collections.abc import MutableMapping
from numcodecs import Blosc
from typing import Iterator, Any, List, Dict, Tuple, Callable, Iterable, KeysView, Mapping, Union
import numpy as np
import pandas as pd
from .cciodp import CciOdp
from .constants import COMMON_COORD_VAR_NAMES
_STATIC_ARRAY_COMPRESSOR_PARAMS = dict(cname='zstd', clevel=1, shuffle=Blosc.SHUFFLE, blocksize=0)
_STATIC_ARRAY_COMPRESSOR_CONFIG = dict(id='blosc', **_STATIC_ARRAY_COMPRESSOR_PARAMS)
_STATIC_ARRAY_COMPRESSOR = Blosc(**_STATIC_ARRAY_COMPRESSOR_PARAMS)
_LOG = logging.getLogger()
_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S"
def _dict_to_bytes(d: Dict):
return _str_to_bytes(json.dumps(d, indent=2))
def _str_to_bytes(s: str):
return bytes(s, encoding='utf-8')
# todo move this to xcube
class RemoteChunkStore(MutableMapping, metaclass=ABCMeta):
"""
A remote Zarr Store.
:param data_id: The identifier of the data resource
:param cube_params: A mapping containing additional parameters to define
the data set.
:param observer: An optional callback function called when remote requests
are mode: observer(**kwargs).
:param trace_store_calls: Whether store calls shall be printed
(for debugging).
"""
def __init__(self,
data_id: str,
cube_params: Mapping[str, Any] = None,
observer: Callable = None,
trace_store_calls=False):
if not cube_params:
cube_params = {}
self._variable_names = cube_params.get('variable_names',
self.get_all_variable_names())
self._attrs = {}
self._observers = [observer] if observer is not None else []
self._trace_store_calls = trace_store_calls
self._dataset_name = data_id
self._time_ranges = self.get_time_ranges(data_id, cube_params)
logging.debug('Determined time ranges')
if not self._time_ranges:
raise ValueError('Could not determine any valid time stamps')
t_array = [s.to_pydatetime()
+ 0.5 * (e.to_pydatetime() - s.to_pydatetime())
for s, e in self._time_ranges]
t_array = np.array(t_array).astype('datetime64[s]').astype(np.int64)
t_bnds_array = \
np.array(self._time_ranges).astype('datetime64[s]').astype(np.int64)
time_coverage_start = self._time_ranges[0][0]
time_coverage_end = self._time_ranges[-1][1]
cube_params['time_range'] = (self._extract_time_range_as_strings(
cube_params.get('time_range',
self.get_default_time_range(data_id))))
self._vfs = {}
self._var_name_to_ranges = {}
self._ranges_to_indexes = {}
self._ranges_to_var_names = {}
bbox = cube_params.get('bbox', None)
lon_size = -1
lat_size = -1
self._dimension_chunk_offsets = {}
self._dimensions = self.get_dimensions()
coords_data = self.get_coords_data(data_id)
logging.debug('Determined coordinates')
coords_data['time'] = {}
coords_data['time']['size'] = len(t_array)
coords_data['time']['data'] = t_array
if 'time_bounds' in coords_data:
coords_data.pop('time_bounds')
coords_data['time_bnds'] = {}
coords_data['time']['size'] = len(t_bnds_array)
coords_data['time']['data'] = t_bnds_array
sorted_coords_names = list(coords_data.keys())
sorted_coords_names.sort()
lat_min_offset = -1
lat_max_offset = -1
lon_min_offset = -1
lon_max_offset = -1
for coord_name in sorted_coords_names:
if coord_name == 'time' or coord_name == 'time_bnds':
continue
coord_attrs = self.get_attrs(coord_name)
coord_attrs['_ARRAY_DIMENSIONS'] = coord_attrs['dimensions']
coord_data = coords_data[coord_name]['data']
if bbox is not None and \
(coord_name == 'lat' or coord_name == 'latitude'):
if coord_data[0] < coord_data[-1]:
lat_min_offset = bisect.bisect_left(coord_data, bbox[1])
lat_max_offset = bisect.bisect_right(coord_data, bbox[3])
else:
lat_min_offset = len(coord_data) - \
bisect.bisect_left(coord_data[::-1], bbox[3])
lat_max_offset = len(coord_data) - \
bisect.bisect_right(coord_data[::-1], bbox[1])
coords_data = self._adjust_coord_data(coord_name,
lat_min_offset,
lat_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'lon' or coord_name == 'longitude'):
lon_min_offset = bisect.bisect_left(coord_data, bbox[0])
lon_max_offset = bisect.bisect_right(coord_data, bbox[2])
coords_data = self._adjust_coord_data(coord_name,
lon_min_offset,
lon_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'latitude_bounds' or coord_name == 'lat_bounds'
or coord_name == 'latitude_bnds' or coord_name == 'lat_bnds'):
coords_data = self._adjust_coord_data(coord_name,
lat_min_offset,
lat_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'longitude_bounds' or coord_name == 'lon_bounds'
or coord_name == 'longitude_bnds' or coord_name == 'lon_bnds'):
coords_data = self._adjust_coord_data(coord_name,
lon_min_offset,
lon_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
if len(coord_data) > 0:
coord_array = np.array(coord_data)
self._add_static_array(coord_name, coord_array, coord_attrs)
else:
shape = list(coords_data[coord_name].
get('shape', coords_data[coord_name].get('size')))
chunk_size = coords_data[coord_name]['chunkSize']
if not isinstance(chunk_size, List):
chunk_size = [chunk_size]
encoding = self.get_encoding(coord_name)
self._add_remote_array(coord_name, shape, chunk_size,
encoding, coord_attrs)
time_attrs = {
"_ARRAY_DIMENSIONS": ['time'],
"units": "seconds since 1970-01-01T00:00:00Z",
"calendar": "proleptic_gregorian",
"standard_name": "time",
"bounds": "time_bnds",
}
time_bnds_attrs = {
"_ARRAY_DIMENSIONS": ['time', 'bnds'],
"units": "seconds since 1970-01-01T00:00:00Z",
"calendar": "proleptic_gregorian",
"standard_name": "time_bnds",
}
self._add_static_array('time', t_array, time_attrs)
self._add_static_array('time_bnds', t_bnds_array, time_bnds_attrs)
coordinate_names = [coord for coord in coords_data.keys()
if coord not in COMMON_COORD_VAR_NAMES]
coordinate_names = ' '.join(coordinate_names)
global_attrs = dict(
Conventions='CF-1.7',
coordinates=coordinate_names,
title=data_id,
date_created= | pd.Timestamp.now() | pandas.Timestamp.now |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from plateau.utils.pandas import (
aggregate_to_lists,
concat_dataframes,
drop_sorted_duplicates_keep_last,
is_dataframe_sorted,
mask_sorted_duplicates_keep_last,
merge_dataframes_robust,
sort_dataframe,
)
class TestConcatDataframes:
@pytest.fixture(params=[True, False])
def dummy_default(self, request):
if request.param:
return pd.DataFrame(data={"a": [-2, -3], "b": 1.0}, columns=["a", "b"])
else:
return None
@pytest.fixture(params=[True, False])
def maybe_iter(self, request):
if request.param:
return iter
else:
return list
def test_many(self, dummy_default, maybe_iter):
dfs = [
pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(
data={"a": [2, 3], "b": 2.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(data={"a": [4, 5], "b": 3.0}, columns=["a", "b"]),
]
expected = pd.DataFrame(
{"a": [0, 1, 2, 3, 4, 5], "b": [1.0, 1.0, 2.0, 2.0, 3.0, 3.0]},
columns=["a", "b"],
)
actual = concat_dataframes(maybe_iter(dfs), dummy_default)
pdt.assert_frame_equal(actual, expected)
def test_single(self, dummy_default, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([df.copy()]), dummy_default)
| pdt.assert_frame_equal(actual, df) | pandas.testing.assert_frame_equal |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([ | Timedelta('-2days') | pandas.tseries.tdi.Timedelta |
#!/usr/bin/env python
ONLINE_RETAIL_XLSX = 'OnlineRetail.xlsx'
ONLINE_RETAIL_CSV = 'OnlineRetail.csv'
ONLINE_RETAIL_JSON = 'OnlineRetail.json'
def download_spreadsheet():
print('Starting download_spreadsheet() ...')
# support python 2 and 3
try:
# python 3
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
source_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx"
urlrequest.urlretrieve(source_url, ONLINE_RETAIL_XLSX)
print('Finished download_spreadsheet() ...')
def create_csv():
print('Starting create_csv() ...')
import pandas as pd
import datetime
df = pd.read_excel(ONLINE_RETAIL_XLSX, sheetname='Online Retail')
# remove nan customer IDs
df = df[pd.notnull(df['CustomerID'])]
df['CustomerID'] = df['CustomerID'].astype(int)
# remove negative quantities - this also removes non-numeric InvoiceNo's
df = df.ix[df['Quantity'] > 0]
# Add a line number for each item in an invoice
df['LineNo'] = df.groupby(['InvoiceNo']).cumcount()+1
# the dataset starts at approx 6am and finishes at approx 10pm
# we want to data to span 24 hours
df_AM = df.copy()
df_PM = df.copy()
df_AM['InvoiceNo'] = (df_AM['InvoiceNo'].astype('str') + '1').astype(int)
df_PM['InvoiceNo'] = (df_PM['InvoiceNo'].astype('str') + '2').astype(int)
df_PM['InvoiceDate'] = df_PM['InvoiceDate'] + datetime.timedelta(hours=12)
df = pd.concat([df_AM, df_PM])
# Sort dataframe
df['InvoiceTime'] = | pd.DatetimeIndex(df['InvoiceDate']) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 13:38:13 2020
@author: zhanghai
"""
'''
Input parameters: ticker,interval, test start date, test end date, model name
Output : dataframe: initial deposit, gross profit,gross loss, total net profit,profit factor,
expected payoff, absolute drawdown, maximal drawdown, relative drawdown,
profit trades%, loss trades
Entry strategy: -1: close long position and wait;
0: keep current position;
1: create
Exit strategy: reverse signal appears;
reach stop loss entry-ATR
position size control: ATR based,ATR period = 20; ATR multiplier = 1; 2% risk tolerance, nomal value is average of ATR
'''
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import datetime
import matplotlib.pyplot as plt
import stockstats
import sys
sys.path.append('../')
from uis.calculate_ama import calculate_ama
from data_processing.load_data import load_rawdata
class AMABackTest():
def __init__(self, etf, start_date, end_date, model_name='AMA',initial_deposit=100000, price_type='open',er_window = 9, slow_window = 20, fast_window = 4):
self.ticker = etf
self.start_date = start_date
self.end_date = end_date
self.model_name = model_name
self.init_deposit = initial_deposit
self.price_type = price_type
self.er_window = er_window
self.slow_window = slow_window
self.fast_window = fast_window
self.raw_data = load_rawdata(etf, 'weekly')
print("Self.raw",self.raw_data)
self.indicators = stockstats.StockDataFrame.retype(self.raw_data.copy())
ama, _, _ = calculate_ama(self.raw_data, self.indicators, self.price_type, self.er_window, self.slow_window, self.fast_window)
self.raw_data['ama'] = ama
self.report = pd.DataFrame(columns=['position size','total','profit'])
def predict(self,cur_date):
if self.raw_data.loc[cur_date]['open'] > self.raw_data.loc[cur_date]['ama'] and self.raw_data.loc[cur_date]['close'] > \
self.raw_data.loc[cur_date]['ama']:
signal = 1
elif self.raw_data.loc[cur_date]['open'] < self.raw_data.loc[cur_date]['ama'] and self.raw_data.loc[cur_date]['close'] < \
self.raw_data.loc[cur_date]['ama']:
signal = -1
else:
signal = 0
return signal
def up_action(self,cur_date):
if self.position == 'empty':
self.entry_price = self.raw_data['close'][cur_date]
self.entyr_atr = self.indicators['atr'].loc[cur_date]
self.position_size = int(self.total_value*0.05/self.entyr_atr)
if self.position_size*self.entry_price > self.total_value:
self.position_size = int(self.total_value/self.entry_price)
df = pd.DataFrame({'position size':self.position_size,'total':self.total_value,'profit':0},index=[cur_date])
self.report = self.report.append(df)
self.position = 'long'
else:
stop_price = self.entry_price - 0.1*self.entyr_atr
target_price = self.entry_price * 1.3
if self.raw_data['low'][cur_date] < stop_price < self.raw_data['high'][cur_date]:
#sell at stop price
profit = round((self.raw_data['high'][cur_date] - self.entry_price)*self.position_size, 2)
self.total_value += profit
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date])
self.report = self.report.append(df)
self.position = 'empty'
elif self.raw_data['low'][cur_date] < target_price < self.raw_data['high'][cur_date]:
#sell at target price
profit = round((self.raw_data['low'][cur_date]-self.entry_price)*self.position_size, 2)
self.total_value += profit
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date])
self.report = self.report.append(df)
self.position = 'empty'
else:
#hold the position
profit = round((self.raw_data['close'][cur_date]-self.entry_price)*self.position_size, 2)
current_value = self.total_value + profit
df = pd.DataFrame({'position size':self.position_size,'total':current_value,'profit':0},index=[cur_date])
self.report = self.report.append(df)
def down_action(self,cur_date):
if self.position == 'long':
#sell long position
sell_price = self.raw_data['close'][cur_date]
profit = round((sell_price-self.entry_price)*self.position_size, 2)
self.total_value += profit
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date])
self.report = self.report.append(df)
self.position = 'empty'
else:
#wait
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':0},index=[cur_date])
self.report = self.report.append(df)
def sideway_action(self,cur_date):
if self.position == 'long':
#maintain long position
stop_price = self.entry_price - 0.1 * self.entyr_atr
target_price = self.entry_price * 1.3
if self.raw_data['low'][cur_date] < stop_price < self.raw_data['high'][cur_date]:
#sell at stop price
profit = round((self.raw_data['high'][cur_date] - self.entry_price)*self.position_size, 2)
self.total_value += profit
df = | pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date]) | pandas.DataFrame |
import enum
import numpy as np
import pandas as pd
import pytest
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float, Enum
from cascade.dismod.db.wrapper import DismodFile, _get_engine, _validate_data
from cascade.dismod.db import DismodFileError
@pytest.fixture
def engine():
return _get_engine(None)
@pytest.fixture
def base_file(engine):
dm_file = DismodFile(engine, {"howdy": float}, {"there": int})
dm_file.make_densities()
ages = pd.DataFrame({"age": np.array([6.0, 22.0, 48.0])})
dm_file.age = ages
dm_file.time = pd.DataFrame({"time": [1997.0, 2005.0, 2017.0]})
dm_file.integrand = pd.DataFrame({"integrand_name": ["prevalence"]})
return dm_file
def test_wrong_type(base_file):
ages = pd.DataFrame({"age": np.array(["strings", "for", "ages"])})
base_file.age = ages
with pytest.raises(DismodFileError):
base_file.flush()
def test_is_dirty__initially(base_file):
assert base_file._is_dirty("age")
def test_is_dirty__after_flush(base_file):
base_file.flush()
assert not base_file._is_dirty("age")
def test_is_dirty__after_modification(base_file):
base_file.flush()
base_file.age.loc[0, "age"] *= 2
assert base_file._is_dirty("age")
def test_is_dirty__on_read(base_file, engine):
base_file.flush()
dm_file2 = DismodFile(engine, {"howdy": float}, {"there": int})
dm_file2.age
assert not dm_file2._is_dirty("age")
def test_is_dirty__not_yet_read(base_file):
assert not base_file._is_dirty("foo_bar")
def test_dmfile_read(base_file, engine):
ages = base_file.age
times = base_file.time
base_file.flush()
dm_file2 = DismodFile(engine, {"howdy": float}, {"there": int})
assert ages.equals(dm_file2.age)
assert times.equals(dm_file2.time)
def test_reading_modified_columns(base_file, engine):
base_file.flush()
base_file.age.loc[0, "age"] *= 2
ages = base_file.age.copy()
base_file.flush()
dm_file2 = DismodFile(engine, {"howdy": float}, {"there": int})
assert ages.equals(dm_file2.age)
DummyBase = declarative_base()
class DummyTable(DummyBase):
__tablename__ = "test_table"
primary_key_column = Column(Integer(), primary_key=True)
integer_column = Column(Integer())
float_column = Column(Float())
nonnullable_column = Column(Integer(), nullable=False)
string_column = Column(String())
enum_column = Column(Enum(enum.Enum("Bee", "bumble honey carpenter wool_carder")))
def test_validate_data__happy_path():
data = pd.DataFrame(
{
"integer_column": [1, 2, 3],
"float_column": [1.0, 2.0, 3.0],
"string_column": ["a", "b", "c"],
"enum_column": ["bumble", "honey", "carpenter"],
"nonnullable_column": [1, 2, 3],
}
)
_validate_data(DummyTable.__table__, data)
def test_validate_data__bad_integer():
data = pd.DataFrame({"integer_column": [1.0, 2.0, 3.0], "nonnullable_column": [1, 2, 3]})
with pytest.raises(DismodFileError) as excinfo:
_validate_data(DummyTable.__table__, data)
assert "integer_column" in str(excinfo.value)
def test_validate_data__bad_float():
data = pd.DataFrame({"float_column": ["1.0", "2.0", "3.0"], "nonnullable_column": [1, 2, 3]})
with pytest.raises(DismodFileError) as excinfo:
_validate_data(DummyTable.__table__, data)
assert "float_column" in str(excinfo.value)
def test_validate_data__bad_string():
data = pd.DataFrame({"string_column": [1, 2, 3], "nonnullable_column": [1, 2, 3]})
with pytest.raises(DismodFileError) as excinfo:
_validate_data(DummyTable.__table__, data)
assert "string_column" in str(excinfo.value)
def test_validate_data__bad_enum():
data = | pd.DataFrame({"enum_column": [1, 2, 3], "nonnullable_column": [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
import sys
from pyfaidx import Faidx
from clonesig.data_loader import PAT_LIST, DataWriter
import pathlib
from scipy.stats import beta
import numpy as np
tumor = sys.argv[1]
seq_depth = sys.argv[2]
"""
tumor = "T2"
seq_depth = "8X"
#sed 's/^\#\#/\&/g' data/salcedo_dream_challenge/MuTect_inputs/mutect_filtered_T2.T.16XnoXY.vcf > data/salcedo_dream_challenge/MuTect_inputs/mutect_filtered_T2.T.16XnoXY_clean.vcf
"""
np.random.seed(int(seq_depth[:-1])*int(tumor[1]))
def get_context(x):
match_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
if x['REF'] in ('C', 'T'):
context = x['triplet'][0] + '[' + x['REF'] + '>' + x['ALT'] + ']' + \
x['triplet'][2]
else:
context = match_dict[x['triplet'][2]] + '[' + match_dict[x['REF']] +\
'>' + match_dict[x['ALT']] + ']' + match_dict[x['triplet'][0]]
return context
vcf_filename = 'data/salcedo_dream_challenge/MuTect_inputs/mutect_filtered_{}.T.{}noXY_clean.vcf'.format(tumor, seq_depth)
cnv_filename = 'data/salcedo_dream_challenge/MuTect_inputs/{}-{}_refit_subclones_noXY.txt'.format(tumor, seq_depth)
purity_filename = 'data/salcedo_dream_challenge/MuTect_inputs/{}-{}_refit_cellularity_ploidy.txt'.format(tumor, seq_depth)
ref_fasta = 'external_data/hs37d5.fa'
fa = Faidx(ref_fasta)
# downloaded from ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_reference_assembly_sequence
vcf_data = | pd.read_csv(vcf_filename, sep='\t', comment='&') | pandas.read_csv |
from linearmodels.compat.statsmodels import Summary
import warnings
import numpy as np
from numpy.linalg import pinv
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
import scipy.linalg
from statsmodels.tools.tools import add_constant
from linearmodels.datasets import card
from linearmodels.iv import IV2SLS, IVGMM, IVGMMCUE, IVLIML
from linearmodels.iv.model import _OLS
from linearmodels.iv.results import compare
from linearmodels.shared.hypotheses import WaldTestStatistic
from linearmodels.shared.utility import AttrDict
@pytest.fixture(scope="module")
def data():
n, q, k, p = 1000, 2, 5, 3
rs = np.random.RandomState(12345)
clusters = rs.randint(0, 10, n)
rho = 0.5
r = scipy.linalg.toeplitz([1] + (rho + np.linspace(0.1, -0.1, 8)).tolist())
r[-1, 2:] = 0
r[2:, -1] = 0
r[-1, -1] = 1
v = rs.multivariate_normal(np.zeros(r.shape[0]), r, n)
x = v[:, :k]
z = v[:, k : k + p]
e = v[:, [-1]]
params = np.arange(1, k + 1) / k
params = params[:, None]
y = x @ params + e
exog_instr = np.column_stack((x[:, q:], z))
xhat = exog_instr @ np.linalg.pinv(exog_instr) @ x
nobs, nvar = x.shape
s2 = e.T @ e / nobs
s2_debiased = e.T @ e / (nobs - nvar)
v = xhat.T @ xhat / nobs
vinv = np.linalg.inv(v)
kappa = 0.99
vk = (x.T @ x * (1 - kappa) + kappa * xhat.T @ xhat) / nobs
return AttrDict(
nobs=nobs,
e=e,
x=x,
y=y,
z=z,
xhat=xhat,
params=params,
s2=s2,
s2_debiased=s2_debiased,
clusters=clusters,
nvar=nvar,
v=v,
vinv=vinv,
vk=vk,
kappa=kappa,
dep=y,
exog=x[:, q:],
endog=x[:, :q],
instr=z,
)
def get_all(v):
attr = [d for d in dir(v) if not d.startswith("_")]
for a in attr:
val = getattr(v, a)
if a in ("conf_int", "durbin", "wu_hausman", "c_stat"):
val()
def test_rank_deficient_exog_exception(data):
exog = data.exog.copy()
exog[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, exog, data.endog, data.instr)
def test_rank_deficient_endog_exception(data):
endog = data.endog.copy()
endog[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, endog, data.instr)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.exog, data.instr)
def test_invalid_weights_exception(data):
weights = np.zeros_like(data.dep)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, data.instr, weights=weights)
def test_rank_deficient_instr_exception(data):
instr = data.instr.copy()
instr[:, :2] = 1
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, instr)
with pytest.raises(ValueError):
IV2SLS(data.dep, data.exog, data.endog, data.exog)
def test_kappa_error_exception(data):
with pytest.raises(ValueError):
IVLIML(data.dep, data.exog, data.endog, data.instr, kappa=np.array([1]))
def test_fuller_error_exception(data):
with pytest.raises(ValueError):
IVLIML(data.dep, data.exog, data.endog, data.instr, fuller=np.array([1]))
def test_kappa_fuller_warning_exception(data):
with warnings.catch_warnings(record=True) as w:
IVLIML(data.dep, data.exog, data.endog, data.instr, kappa=0.99, fuller=1)
assert len(w) == 1
def test_string_cat_exception(data):
instr = data.instr.copy()
n = data.instr.shape[0]
cat = pd.Series(["a"] * (n // 2) + ["b"] * (n // 2))
instr = pd.DataFrame(instr)
instr["cat"] = cat
res = IV2SLS(data.dep, data.exog, data.endog, instr).fit(cov_type="unadjusted")
instr["cat"] = cat.astype("category")
res_cat = IV2SLS(data.dep, data.exog, data.endog, instr).fit(cov_type="unadjusted")
| assert_series_equal(res.params, res_cat.params) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
import glob
import os
import pprint
import traceback
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# Extraction function
def tflog2pandas(path: str) -> pd.DataFrame:
"""convert single tensorflow log file to pandas DataFrame
Parameters
----------
path : str
path to tensorflow log file
Returns
-------
pd.DataFrame
converted dataframe
"""
DEFAULT_SIZE_GUIDANCE = {
"compressedHistograms": 1,
"images": 1,
"scalars": 0, # 0 means load all
"histograms": 1,
}
runlog_data = pd.DataFrame({"metric": [], "value": [], "step": []})
try:
event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)
event_acc.Reload()
tags = event_acc.Tags()["scalars"]
event_list = event_acc.Scalars('Eval_Reward/Mean')
values = list(map(lambda x: x.value, event_list))
step = list(map(lambda x: x.step, event_list))
r = {"metric": ['Eval_Reward/Mean'] * len(step), "value": values, "step": step}
r = | pd.DataFrame(r) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tkinter import *
from tkinter import filedialog
# Importing Chen Values
chen_67_to_69 = pd.read_csv('chcof1.id', index_col=0)
chen_70_to_74 = | pd.read_csv('chcof2.id', index_col=0) | pandas.read_csv |
from __future__ import division
# import libraries
from datetime import datetime, timedelta
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import plotly as py
import plotly.offline as pyoff
import plotly.graph_objs as go
#inititate Plotly
pyoff.init_notebook_mode() #it generates offline graph in jupyter notebook
#load our data from CSV
# tx_data = pd.read_csv("data.csv")
tx_data = pd.read_csv('data.csv',header=0,encoding = 'unicode_escape') #read csv file and other than ascii mode enabled extra type of characters also
tx_data.head(10) #it prints
#convert the string date field to datetime
tx_data['InvoiceDate'] = | pd.to_datetime(tx_data['InvoiceDate']) | pandas.to_datetime |
# Bereken het percentuele verschil tov een x-aantal dagen ervoor
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sn
import platform
import datetime
import datetime as dt
import streamlit as st
from streamlit import caching
from helpers import * # cell_background, select_period, save_df, drop_columns
from datetime import datetime
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
def week_to_week(df, column_):
column_ = column_ if type(column_) == list else [column_]
newcolumns = []
newcolumns2 = []
for c in column_:
newname = str(c) + "_weekdiff"
newname2 = str(c) + "_weekdiff_index"
newcolumns.append(newname)
newcolumns2.append(newname2)
df[newname] = np.nan
df[newname2] = np.nan
for n in range(7, len(df)):
vorige_week = df.iloc[n - 7][c]
nu = df.iloc[n][c]
waarde = round((((nu - vorige_week) / vorige_week) * 100), 2)
waarde2 = round((((nu) / vorige_week) * 100), 2)
df.at[n, newname] = waarde
df.at[n, newname2] = waarde2
return df, newcolumns, newcolumns2
@st.cache(ttl=60 * 60 * 24)
def get_data_casus_landelijk():
if platform.processor() != "":
url1 = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\COVID-19_casus_landelijk.csv"
url1 = "C:/Users/rcxsm/Documents/phyton_scripts/covid19_seir_models/input/COVID-19_casus_landelijk.csv"
else:
url1= "https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv"
df = pd.read_csv(url1, delimiter=";", low_memory=False)
df["Date_statistics"] = pd.to_datetime(df["Date_statistics"], format="%Y-%m-%d")
df = df.groupby(["Date_statistics", "Agegroup"], sort=True).count().reset_index()
return df
def select_period(df, field, show_from, show_until):
"""Shows two inputfields (from/until and Select a period in a df (helpers.py).
Args:
df (df): dataframe
field (string): Field containing the date
Returns:
df: filtered dataframe
"""
if show_from is None:
show_from = "2021-1-1"
if show_until is None:
show_until = "2030-1-1"
#"Date_statistics"
mask = (df[field].dt.date >= show_from) & (df[field].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def calculate_fraction(df):
nr_of_columns = len (df.columns)
nr_of_rows = len(df)
column_list = df.columns.tolist()
max_waarde = 0
data = []
waardes = []
# 0-9 10-19 20-29 30-39 40-49 50-59 60-69 70-79 80-89 90+
pop_ = [1756000, 1980000, 2245000, 2176000, 2164000, 2548000, 2141000, 1615000, 709000, 130000] # tot 17 464 000
fraction = [0.10055, 0.11338, 0.12855, 0.12460, 0.12391, 0.14590, 0.12260, 0.09248, 0.0405978, 0.0074438846]
for r in range(nr_of_rows):
row_data = []
for c in range(0,nr_of_columns):
if c==0 :
row_data.append( df.iat[r,c])
else:
#try
waarde = df.iat[r,c]/pop_[c-1] * 100_000
row_data.append(waarde)
waardes.append(waarde)
if waarde > max_waarde:
max_waarde = waarde
#except:
# date
# row_data.append( df.iat[r,c])
# pass
data.append(row_data)
df_fractie = pd.DataFrame(data, columns=column_list)
top_waarde = 0.975*max_waarde
return df_fractie, top_waarde
def save_df(df, name):
""" _ _ _ """
OUTPUT_DIR = (
"C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\output\\"
)
name_ = OUTPUT_DIR + name + ".csv"
compression_opts = dict(method=None, archive_name=name_)
df.to_csv(name_, index=False, compression=compression_opts)
print("--- Saving " + name_ + " ---")
def smooth(df, columnlist):
columnlist_sma_df = []
columnlist_df= []
columnlist_names= []
columnlist_ages = []
# 0-9 10-19 20-29 30-39 40-49 50-59 60-69 70-79 80+
#pop_ = [1756000, 1980000, 2245000, 2176000, 2164000, 2548000, 2141000, 1615000, 839000]
#fraction = [0.10055, 0.11338, 0.12855, 0.12460, 0.12391, 0.14590, 0.12260, 0.09248, 0.04804]
for c in columnlist:
new_column = c + "_SMA"
#new_column = c
# print("Generating " + new_column + "...")
df[new_column] = (
df.iloc[:, df.columns.get_loc(c)].rolling(window=WDW2, center=True).mean()
)
columnlist_sma_df.append(df[new_column])
columnlist_df.append(df[c])
columnlist_names.append(new_column)
columnlist_ages.append(c) # alleen de leeftijden, voor de legenda
return df,columnlist_df, columnlist_sma_df,columnlist_names,columnlist_ages, columnlist
def drop_columns(df, what_to_drop):
""" drop columns. what_to_drop : list """
if what_to_drop != None:
what_to_drop = [what_to_drop]
print("dropping " + str(what_to_drop))
for d in what_to_drop:
df = df.drop(columns=[d], axis=1)
return df
def convert(list):
return tuple(list)
def make_age_graph(df, d, columns_original, legendanames, titel):
if d is None:
st.warning("Choose ages to show")
st.stop()
with _lock:
color_list = [ "#3e5c76", # blue 6,
"#ff6666", # reddish 0
"#ac80a0", # purple 1
"#3fa34d", # green 2
"#EAD94C", # yellow 3
"#EFA00B", # orange 4
"#7b2d26", # red 5
"#e49273" , # dark salmon 7
"#1D2D44", # 8
"#02A6A8",
"#4E9148",
"#F05225",
"#024754",
"#FBAA27",
"#302823",
"#F07826",
]
# df = agg_ages(df)
fig1y, ax = plt.subplots()
for i, d_ in enumerate(d):
#if d_ == "TOTAAL_index":
if d_[:6] == "TOTAAL":
ax.plot(df["Date_of_statistics_week_start"], df[d_], color = color_list[0], label = columns_original[i], linestyle="--", linewidth=2)
ax.plot(df["Date_of_statistics_week_start"], df[columns_original[i]], color = color_list[0], alpha =0.5, linestyle="dotted", label = '_nolegend_', linewidth=2)
else:
ax.plot(df["Date_of_statistics_week_start"], df[d_], color = color_list[i+1], label = columns_original[i])
ax.plot(df["Date_of_statistics_week_start"], df[columns_original[i]], color = color_list[i+1], alpha =0.5, linestyle="dotted", label = '_nolegend_' )
plt.legend()
if y_zero == True:
ax.set_ylim(bottom = 0)
titel_ = titel + " (weekcijfers)"
plt.title(titel_)
plt.xticks(rotation=270)
ax.text(
1,
1.1,
"Created by <NAME> — @rcsmit",
transform=ax.transAxes,
fontsize="xx-small",
va="top",
ha="right",
)
# plt.tight_layout()
# plt.show()
st.pyplot(fig1y)
def show_age_graph (df,d, titel):
df, columnlist_df, columnlist_sma_df, columnlist_sma, columnlist_ages_legenda, columnlist_original = smooth(df, d)
make_age_graph(df, columnlist_sma, columnlist_original, columnlist_ages_legenda, titel)
def agg_ages(df):
# make age groups
df["0-29"] = df["0-14"] + df["15-19"] + df["20-24"] + df["25-29"]
df["30-49"] = df["30-34"] + df["35-39"] + df["40-44"] + df["45-49"]
df["50-69"] = df["50-54"] + df["55-59"] + df["60-64"] + df["65-69"]
df["70-89"] = df["70-74"] + df["75-79"] + df["80-84"] + df["85-89"]
# # extra groep
df["30-69"] = df["30-34"] + df["35-39"] + df["40-44"] + df["45-49"] + df["50-54"] + df["55-59"] + df["60-64"] + df["65-69"]
# # indeling RIVM
df["0-39"] = df["0-14"] + df["15-19"] + df["20-24"] + df["25-29"] + df["30-34"] + df["35-39"]
df["40-59"] = df["40-44"] + df["45-49"] + df["50-54"] + df["55-59"]
df["60-79"] = df["60-64"] + df["65-69"] + df["70-74"] + df["75-79"]
df["80+"] = df["80-84"] + df["85-89"] + df["90+"]
# CORRESPONDEREND MET CASUS LANDELIJK
# indeling RIVM
df["0-19"] = df["0-14"] + df["15-19"]
df["20-29"] = df["20-24"] + df["25-29"]
df["30-39"] = df["30-34"] + df["35-39"]
df["40-49"] = df["40-44"] + df["45-49"]
df["50-59"] = df["50-54"] + df["55-59"]
df["60-69"] = df["60-64"] + df["65-69"]
df["70-79"] = df["70-74"] + df["75-79"]
df["80-89"] = df["80-84"] + df["85-89"]
df["TOTAAL"] = df["0-14"] + df["15-19"] + df["20-24"] + df["25-29"] + df["30-34"] + df["35-39"] + df["40-44"] + df["45-49"] + df["50-54"] + df["55-59"] + df["60-64"] + df["65-69"] + df["70-74"] + df["75-79"] + df["80-84"] + df["85-89"] + df["90+"]+ df["Unknown"]
return df
@st.cache(ttl=60 * 60 * 24)
def load_data():
url1 = "https://data.rivm.nl/covid-19/COVID-19_ziekenhuis_ic_opnames_per_leeftijdsgroep.csv"
return pd.read_csv(url1, delimiter=";", low_memory=False)
def prepare_data():
#url1 = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\COVID-19_ziekenhuis_ic_opnames_per_leeftijdsgroep.csv"
df_getdata = load_data()
df = df_getdata.copy(deep=False) # prevent an error [Return value of `prepare_data()` was mutated between runs.]
datumveld = "Date_of_statistics_week_start"
df[datumveld] = | pd.to_datetime(df[datumveld], format="%Y-%m-%d") | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
NWChem Output
#######################
Parse NWChem output files and convert them into an exatomic Universe container.
"""
import six
from os import sep, path
import numpy as np
import pandas as pd
from six import StringIO
from collections import defaultdict
from exa import TypedMeta
from exa.util.units import Length
from exatomic.core.frame import compute_frame_from_atom
from exatomic.algorithms.numerical import _square_indices
from exatomic.algorithms.basis import lmap
from exatomic.core.frame import Frame
from exatomic.core.atom import Atom
from exatomic.core.basis import BasisSet, BasisSetOrder
from exatomic.core.orbital import Orbital, MOMatrix
from .editor import Editor
from .basis import cartesian_ordering_function, spherical_ordering_function
class OutMeta(TypedMeta):
atom = Atom
orbital = Orbital
momatrix = MOMatrix
basis_set = BasisSet
basis_set_order = BasisSetOrder
frame = Frame
class Output(six.with_metaclass(OutMeta, Editor)):
"""Editor for NWChem calculation output file (stdout)."""
def parse_atom(self):
"""Parse the atom dataframe."""
_reatom01 = 'Geometry "'
_reatom02 = 'Atomic Mass'
_reatom03 = 'ECP "ecp basis"'
_reatom04 = 'Output coordinates in'
found = self.find(_reatom01, _reatom02,
_reatom03, _reatom04, keys_only=True)
unit = self[found[_reatom04][0]].split()[3]
unit = "Angstrom" if unit == "angstroms" else "au"
starts = np.array(found[_reatom01]) + 7
stops = np.array(found[_reatom02]) - 1
ecps = np.array(found[_reatom03]) + 2
ecps = {self[ln].split()[0]: int(self[ln].split()[3]) for ln in ecps}
columns = ['label', 'tag', 'Z', 'x', 'y', 'z']
atom = pd.concat([self.pandas_dataframe(s, e, columns)
for s, e in zip(starts, stops)])
atom['symbol'] = atom['tag'].str.extract('([A-z]{1,})([0-9]*)',
expand=False)[0].str.lower().str.title()
atom['Z'] = atom['Z'].astype(np.int64)
atom['Zeff'] = (atom['Z'] - atom['tag'].map(ecps).fillna(value=0)).astype(np.int64)
#n = len(atom)
nf = atom.label.value_counts().max()
nat = atom.label.max()
atom['frame'] = [i for i in range(nf) for j in range(nat)]
atom['label'] -= 1
atom['x'] *= Length[unit, 'au']
atom['y'] *= Length[unit, 'au']
atom['z'] *= Length[unit, 'au']
if atom['frame'].max() > 0:
li = atom['frame'].max()
atom = atom[~(atom['frame'] == li)]
atom.reset_index(drop=True, inplace=True)
del atom['label']
self.atom = Atom(atom)
def parse_orbital(self):
"""Parse the :class:`~exatomic.core.orbital.Orbital` dataframe."""
orbital = None
_remo01 = 'Molecular Orbital Analysis'
_remo02 = 'alpha - beta orbital overlaps'
_remo03 = 'center of mass'
check = self.find(_remo01)
if any(['Alpha' in value for value in check]):
alpha_starts = np.array([no for no, line in check if 'Alpha' in line], dtype=np.int64) + 2
alpha_stops = np.array([no for no, line in check if 'Beta' in line], dtype=np.int64) - 1
beta_starts = alpha_stops + 3
beta_stops = np.array(self.find(_remo02, keys_only=True), dtype=np.int64) - 1
alpha_orbital = self._parse_orbital(alpha_starts, alpha_stops)
beta_orbital = self._parse_orbital(beta_starts, beta_stops)
alpha_orbital['spin'] = 0
beta_orbital['spin'] = 1
orbital = | pd.concat((alpha_orbital, beta_orbital), ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 11:40:16 2017
@author: tobias
"""
import os
import re
import glob
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# Get data for axes
contig_input_file = '/Users/tobias/Desktop/target_contigs/match_table.txt'
workdir = '/'.join(contig_input_file.split('/')[:-1])
contig_matrix = | pd.read_csv(contig_input_file,sep='\t',index_col=0) | pandas.read_csv |
from __future__ import division
import pandas as pd
import numpy as np
# In[2]:
import gc
import subprocess
from ImageDataGenerator import *
import os
import pickle
from keras.models import Model
from keras.layers import Dense, Dropout, Input
from keras.optimizers import Adam, Nadam
from sklearn.model_selection import KFold
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing.sequence import pad_sequences
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.layers import Input, Dropout, Dense, concatenate, CuDNNGRU, Embedding, Flatten, Activation, BatchNormalization, PReLU
from keras.initializers import he_uniform, RandomNormal
from keras.layers import Conv1D, SpatialDropout1D, Bidirectional, Reshape, Dot, GaussianDropout
from keras.layers import GlobalMaxPooling1D, GlobalAveragePooling1D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from tqdm import tqdm
from nltk import ngrams
from sklearn.preprocessing import LabelEncoder
from utility import *
from sklearn.metrics import mean_squared_error
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
# restrict gpu usage
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
################################# Load Data Stage ########################################
train_dir = '../input/train_jpg/data/competition_files/train_jpg/'
test_dir = '../input/test_jpg/data/competition_files/test_jpg/'
test = pd.read_csv('../input/test.csv.zip', parse_dates=["activation_date"])
train = pd.read_csv('../input/train.csv.zip', parse_dates=["activation_date"])
gp = pd.read_csv('../input/aggregated_features.csv')
train = train.merge(gp, on='user_id', how='left')
test = test.merge(gp, on='user_id', how='left')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_dullnesses = x['dullnesses']
train_whitenesses = x['whitenesses']
train_average_pixel_width = x['average_pixel_width']
train_widths = x['widths']
train_heights = x['heights']
train_ids = x['ids']
del x
gc.collect()
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_dullnesses = x['dullnesses']
test_whitenesses = x['whitenesses']
test_average_pixel_width = x['average_pixel_width']
test_widths = x['widths']
test_heights = x['heights']
test_ids = x['ids']
del x
gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = [f'blurinesses'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = [f'dullnesses'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = [f'whitenesses'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = [f'widths'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = [f'heights'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_average_reds = x['average_reds']
train_average_greens = x['average_greens']
train_average_blues = x['average_blues']
train_ids = x['ids']
del x
gc.collect()
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_average_reds = x['average_reds']
test_average_greens = x['average_greens']
test_average_blues = x['average_blues']
test_ids = x['ids']
del x
gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = [f'average_pixel_width'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = [f'average_reds'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = [f'average_blues'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = [f'average_greens'])
incep_train_image_df['image'] = train_ids
incep_test_image_df['image'] = test_ids
train = train.join(incep_train_image_df.set_index('image'), on='image')
test = test.join(incep_test_image_df.set_index('image'), on='image')
train_image_feat = pd.read_csv('../input/train_image_feature_new.csv')
test_image_feat = pd.read_csv('../input/test_image_feature_new.csv')
new_image_feat = list(set(train_image_feat.columns) - set(['image']))
new_image_feat = [f for f in new_image_feat]
train = train.merge(train_image_feat, on='image', how='left')
test = test.merge(test_image_feat, on='image', how='left')
data = | pd.concat([train, test], axis=0, sort=False) | pandas.concat |
#This auxiliary code prepares for each date and country the cases, a table of greylisted status, deaths and tests as publicly reported
#for a window starting before and ending after the date. This is used in evaluating the value of public data.
import numpy as np
import pandas as pd
history_start=20 #---how far into the past---
future_end=20 #----how-far into the future
#---Should testing data be raw or should the OWID smooting be used?
raw=True;
#Downloading actual case and death data from owid and drop duplicates
url="https://covid.ourworldindata.org/data/owid-covid-data.csv"
df = pd.read_csv(url)
df=df.drop_duplicates()
#---Specifying correct date format
df['date']=pd.to_datetime(df['date'], format='%Y-%m-%d')
#Filtering for after July 15th. Estimates are valid after 08/02, so window of +- 20 days.
df=df[(df['date']>=pd.to_datetime('07/01/2020')) \
& (df['date']<pd.to_datetime('11/22/2020'))]
#----Keeping only relevant fields: cases, deaths, tests and dealing with
#----Countries that make corrections in reported data (reporting many times
#----for same date)
if raw:
df=df[['iso_code','date','location','new_cases_smoothed_per_million',\
'new_deaths_smoothed_per_million','new_tests_per_thousand']]
else:
df=df[['iso_code','date','location','new_cases_smoothed_per_million',\
'new_deaths_smoothed_per_million','new_tests_smoothed_per_thousand']]
df=df.sort_values(by=['iso_code','date'])
df=df.loc[df.groupby(['iso_code','date']).date.idxmax()]
#--Transforming three digit ISO code to 2 digit ISO code using conversion table
#--found in other data
url="../OtherData/short_to_iso.csv"
short_to_iso=pd.read_csv(url,error_bad_lines=False)
df=pd.merge(df,short_to_iso,how='left', left_on='iso_code', right_on='alpha-3')
#----Remove duplicates-----
df=df.drop_duplicates()
#-------Reading greylisting and prevalence data from OPE estimation outputs
estimates=\
| pd.read_csv('../OPE_Outputs/ope_dat_TRUE_Window_3_MinTest_30_SmoothPrior_TRUE_2001_0.9.csv') | pandas.read_csv |
import sys, os
sys.path.append('yolov3_detector')
from yolov3_custom_helper import yolo_detector
from darknet import Darknet
sys.path.append('pytorch-YOLOv4')
from tool.darknet2pytorch import Darknet as DarknetYolov4
import argparse
import cv2,time
import numpy as np
from tool.plateprocessing import find_coordinates, plate_to_string, padder, get_color
from tool.utils import alphanumeric_segemntor,plot_boxes_cv2
from tool.torch_utils import *
import time
from utility_codes.tsv_converter import ConverterTSV
use_cuda = True
#################### PLATE ####################
cfg_v4 = 'pytorch-YOLOv4/cfg/yolo-obj.cfg'
weight_v4 = 'weights/plate.weights'
m = DarknetYolov4(cfg_v4)
m.load_weights(weight_v4)
num_classes = m.num_classes
class_names = ['plate']
print('Loading weights from %s... Done!' % (weight_v4))
if use_cuda:
m.cuda()
# m_alpha.cuda()
# yolo_vehicle.cuda()
vehicle_save_filename = 'tsv_files/plate_tester.tsv'
vehicle_writer = ConverterTSV(vehicle_save_filename,file_type='vehicle')
image_dir = 'SIH_hackathon/Detection_Day3/Day3'
image_files = os.listdir(image_dir)
image_files.sort()
OUTPUT_SIZE = (1280, 720)
for img_name in image_files:
frame = cv2.imread(os.path.join(image_dir, img_name))
h, w = frame.shape[0:2]
sized = cv2.resize(frame, (m.width, m.height))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
confidence = 0.2
boxes = do_detect(m, sized, confidence , 0.6, use_cuda)
result_img, cls_conf_plate, coordinates_all, labels = plot_boxes_cv2(frame, boxes[0],classes_to_detect=class_names,fontScale=0.5,thick=2, savename=False, class_names=class_names)
cls_conf_plate = float(cls_conf_plate)
for i,co in enumerate(coordinates_all):
print(co)
data = [img_name, co, labels[i]]
vehicle_writer.put_vehicle(img_name, co, 'plate')
# vehicle_writer.put_vehicle(img_loc, c, 'plate')
cv2.imshow('Image', result_img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
import pandas as pd
def merge_and_save(fp1, fp2, outfile_path):
tsv_file1 = pd.read_csv(fp1, sep='\t', header=0)
tsv_file2 = | pd.read_csv(fp2, sep='\t', header=0) | pandas.read_csv |
import copy
import warnings
import catboost as cgb
import hyperopt
import lightgbm as lgb
import pandas as pd
import xgboost as xgb
from wax_toolbox import Timer
from churnchall.constants import MODEL_DIR, RESULT_DIR
from churnchall.datahandler import DataHandleCookie, to_gradboost_dataset
from churnchall.tuning import HyperParamsTuningMixin
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
def compute_auc_lift(y_pred, y_true, target):
df_lift = pd.DataFrame({'pred': y_pred, 'true': y_true})
# Sort by prediction
if target == 1:
df_lift = df_lift.sort_values("pred", ascending=False)
elif target == 0:
df_lift = df_lift.sort_values("pred", ascending=True)
else:
raise ValueError
# compute lift score for each sample of population
nb_targets = float(df_lift[df_lift['true'] == target].shape[0])
df_lift["auclift"] = (df_lift["true"] == target).cumsum() / nb_targets
auc_lift = df_lift["auclift"].mean()
return auc_lift
def lgb_auc_lift(y_pred, y_true, target=0):
y_true = y_true.label
auc_lift = compute_auc_lift(y_pred, y_true, target)
return "AUC Lift", auc_lift, True
def xgb_auc_lift(y_pred, y_true, target=0):
y_true = y_true.get_label()
auc_lift = compute_auc_lift(y_pred, y_true, target)
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
# since preds are margin(before logistic transformation, cutoff at 0)
return "AUC_Lift", auc_lift
def get_df_importance(booster):
if hasattr(booster, "feature_name"): # lightgbm
idx = booster.feature_name()
arr = booster.feature_importance()
df = pd.DataFrame(index=idx, data=arr, columns=["importance"])
elif hasattr(booster, "get_score"): # xgboost
serie = pd.Series(booster.get_score())
df = pd.DataFrame(columns=["importance"], data=serie)
elif hasattr(booster, "get_feature_importance"): # catboost
idx = booster.feature_names_
arr = booster.get_feature_importance()
df = pd.DataFrame(index=idx, data=arr, columns=["importance"])
else:
raise ValueError(type(booster))
# Traduce in percentage:
df["importance"] = df["importance"] / df["importance"].sum() * 100
df = df.sort_values("importance", ascending=False)
return df
class BaseModelCookie(DataHandleCookie, HyperParamsTuningMixin):
# Attributes to be defined:
@property
def algo():
raise NotImplementedError
@property
def common_params():
raise NotImplementedError
@property
def params_best_fit():
raise NotImplementedError
def save_model(self, booster):
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
f = MODEL_DIR / "{}_model_{}.txt".format(self.algo, now)
booster.save_model(f.as_posix())
return f
@staticmethod
def _generate_plot(eval_hist):
try:
from plotlyink import register_iplot_accessor
register_iplot_accessor()
dfhist = pd.DataFrame(eval_hist)
fig = dfhist.iplot.scatter(as_figure=True)
import plotly
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
filepath = RESULT_DIR / 'lgb_eval_hist_{}.html'.format(now)
plotly.offline.plot(fig, filename=filepath.as_posix())
except ImportError:
pass
# Methods to be implemented
def train():
raise NotImplementedError
def validate():
raise NotImplementedError
def cv():
raise NotImplementedError
class LgbCookie(BaseModelCookie):
algo = 'lightgbm'
# Common params for LightGBM
common_params = {
"verbose": -1,
"nthreads": 16,
# 'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
"scale_pos_weight": 0.97, # used only in binary application, weight of labels with positive class
"objective": "xentropy", # better optimize on cross-entropy loss for auc
"metric": {"auc"}, # alias for roc_auc_score
}
# Best fit params
params_best_fit = {
"boosting_type": "gbdt", # algorithm to use
"learning_rate": 0.04,
"num_leaves": 10, # we should let it be smaller than 2^(max_depth)
# "min_data_in_leaf": 20, # Minimum number of data need in a child
"max_depth": -1, # -1 means no limit
"bagging_fraction": 0.9487944316907742, # Subsample ratio of the training instance.
"feature_fraction": 0.9763410806631222, # Subsample ratio of columns when constructing each tree.
"bagging_freq": 14, # frequence of subsample, <=0 means no enable
# "max_bin": 200,
'min_data_in_leaf': 14, # minimal number of data in one leaf
# 'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
# 'subsample_for_bin': 200000, # Number of samples for constructing bin
# 'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
# 'reg_alpha': 0, # L1 regularization term on weights
# 'reg_lambda': 0, # L2 regularization term on weights
**common_params,
}
# tuning attributes in relation to HyperParamsTuningMixin
int_params = ("num_leaves", "max_depth", "min_data_in_leaf",
"bagging_freq")
float_params = ("learning_rate", "feature_fraction", "bagging_fraction")
hypertuning_space = {
"boosting": hyperopt.hp.choice("boosting", ["gbdt", "dart"]), # "rf",
"num_leaves": hyperopt.hp.quniform("num_leaves", 10, 60, 2),
"min_data_in_leaf": hyperopt.hp.quniform("min_data_in_leaf", 5, 20, 2),
# "learning_rate": hyperopt.hp.uniform("learning_rate", 0.001, 0.1),
"feature_fraction": hyperopt.hp.uniform("feature_fraction", 0.85,
0.99),
"bagging_fraction": hyperopt.hp.uniform("bagging_fraction", 0.85,
0.99),
"bagging_freq": hyperopt.hp.quniform("bagging_freq", 6, 18, 2),
}
def validate(self, save_model=True, **kwargs):
dtrain, dtest = self.get_train_valid_set(as_lgb_dataset=True)
valid_sets = [dtrain, dtest]
valid_names = ['train', 'test']
booster = lgb.train(
params=self.params_best_fit,
train_set=dtrain,
valid_sets=valid_sets,
valid_names=valid_names,
feval=lgb_auc_lift,
# adaptative learning rate :
# learning_rates=lambda iter: 0.5 * (0.999 ** iter),
**kwargs,
)
if save_model:
self.save_model(booster)
return booster
def cv(self,
params_model=None,
nfold=5,
num_boost_round=10000,
early_stopping_rounds=100,
generate_plot=False,
**kwargs):
dtrain = self.get_train_set(as_lgb_dataset=True)
# If no params_model is given, take self.params_best_fit
if params_model is None:
params_model = self.params_best_fit
eval_hist = lgb.cv(
params=params_model,
train_set=dtrain,
nfold=nfold,
verbose_eval=True, # display the progress
feval=lgb_auc_lift,
# display the standard deviation in progress, results are not affected
show_stdv=True,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
**kwargs,
)
if generate_plot:
self._generate_plot(eval_hist)
return eval_hist
def predict(self, from_model_saved, label=None):
booster = lgb.Booster(model_file=from_model_saved)
df = self.get_test_set()
with Timer("Predicting"):
pred = booster.predict(df)
if not label:
now = pd.Timestamp.now(tz='CET').strftime("%d-%Hh-%Mm")
label = 'pred_{}_{}'.format(self.algo, now)
return | pd.DataFrame({label: pred}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np, pandas as pd
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from glob import glob
import os
def make_prioritycut_ctl(datadir='/Users/luke/local/TIC/CTL71/',
prioritycut=0.0015,
subcols = ['RA', 'DEC', 'TESSMAG', 'TEFF', 'PRIORITY',
'RADIUS', 'MASS', 'CONTRATIO', 'ECLONG',
'ECLAT', 'DIST', 'TICID', 'SPEC_LIST'],
savpath = '../data/TIC71_prioritycut.csv'):
'''
I downloaded the 2018/07/07 CTL direct from
http://astro.phy.vanderbilt.edu/~oelkerrj/tic7_ctl1_20182606.tar.gz.
It's only 2Gb, but regardless I put in on a storage drive.
From the docs at https://filtergraph.com/tess_ctl:
This portal was updated to reflect the CTL of TIC-7.1 on July 7, 2018.
This Candidate Target List (CTL-7.1) is a compilation of several
catalogs, including 2MASS, Gaia DR1, UCAC-4 & 5, Tycho-2, APASS DR9 and
others. The CTL is the current best effort to identify stars most
suitable for transit detection with TESS. Stars are considered for the
CTL if they are: 1) identified as RPMJ dwarfs with greater than 2-sigma
confidence; and 2) meet one of the following temperature/magnitude
criteria: (TESSmag < 12 and Teff >= 5500K) or (TESSmag < 13 and Teff <
5500K). Alternatively, a star is included in the CTL, regardless of the
conditions above, if the star is a member of the bright star list
(TESSmag < 6) or the specially curated cool dwarf, hot subdwarf, and
known planet lists. Users who are interested only in the top 200K or
400K stars may use a filter on the priority of 0.0017 and 0.0011
respectively. The full TIC & CTL will be available for download at
MAST. The full machine-readable version of this CTL filtergraph portal
is available as a comma-separated file at (above link).
Kwargs:
datadir, extracted should start looking like:
luke@brik:~/local/TIC/CTL71$ tree -L 1
.
├── 00-02.csv
├── 02-04.csv
├── 04-06.csv
├── 06-08.csv
├── 08-10.csv
├── 10-12.csv
├── 12-14.csv
├── 14-16.csv
├── 16-18.csv
├── 18-20.csv
├── 20-22.csv
├── 22-24.csv
└── header.txt
prioritycut: 0.0015 corresponds to top 300k or so.
subcols: to write out in prioritycut csv
'''
with open(datadir+'header.txt') as f:
hdr = f.readlines()[0]
columns = [l.strip('\n') for l in hdr.split(',')]
subcats = np.sort(glob(datadir+'??-??.csv'))
print('making priority cut catalog...')
for ix, subcat in enumerate(subcats):
print(ix)
if os.path.exists(datadir+'temp_{:d}.csv'.format(ix)):
continue
sc = pd.read_csv(subcat, names=columns)
sc = sc[subcols]
sc = sc[sc['PRIORITY']>prioritycut]
sc.to_csv(datadir+'temp_{:d}.csv'.format(ix), index=False)
temps = np.sort(glob(datadir+'temp_*.csv'))
for ix, temp in enumerate(temps):
if ix == 0:
df = pd.read_csv(temp)
else:
new = pd.read_csv(temp)
df = pd.concat([df, new])
print('length of priorty-cut TIC list is {:d}'.format(len(df)))
os.remove(temp)
df.to_csv(savpath, index=False)
print('saved {:s}'.format(savpath))
def make_sublist_ctl(datadir='/Users/luke/local/TIC/CTL71/',
sublist=None):
'''
sublist: str in ["knownplanet" , ... ]
see make_prioritycut_ctl for verbose docstring
'''
with open(datadir+'header.txt') as f:
hdr = f.readlines()[0]
columns = [l.strip('\n') for l in hdr.split(',')]
subcols = ['RA', 'DEC', 'TESSMAG', 'TEFF', 'PRIORITY', 'RADIUS', 'MASS',
'CONTRATIO', 'ECLONG', 'ECLAT', 'DIST', 'TICID', 'SPEC_LIST']
subcats = np.sort(glob(datadir+'??-??.csv'))
print('making {:s}-cut catalog'.format(sublist))
for ix, subcat in enumerate(subcats):
print(ix)
if os.path.exists(datadir+'temp_{:d}.csv'.format(ix)):
continue
sc = | pd.read_csv(subcat, names=columns) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 17:07:38 2021
@author: perger
"""
# import packages
import pandas as pd
from datetime import timedelta, datetime
import pyam
import FRESH_clustering
from pathlib import Path
import glob
# Model name and version, scenario, region
model_name = 'FRESH:COM v2.0'
scenario_name = 'Default scenario'
region_name = 'Austria'
#filename_community = 'Input_data_community_IAMC.xlsx'
filename_grid = 'Input_data_grid_IAMC.csv'
filename_output = 'output_iamc.xlsx'
clustering = True
# Aggregation in the time domain: preparation
time_zone = '+01:00' # deviation from UTC (+01:00 is CET)
start_date = '2019-01-01 00:00' # YYYY-MM-DD HH:MM
number_days = 365
delta = timedelta(hours=1) # resolution ... hourly
time_steps = []
for t in range(24*number_days):
time_steps.append((datetime.fromisoformat(start_date+time_zone)+t*delta))
index_time = list(range(len(time_steps)))
# Read Input Data (from the IAMC Format)
# input data of prosuemr
#_p = Path('Input_data/Prosumer_data')
prosumer_files = {}
prosumer = []
for file in glob.glob("*.csv"):
i = Path(file).stem
if i.startswith('Prosumer'):
prosumer.append(i)
prosumer_files[i] = Path(file)
#file_community = pd.ExcelFile(filename_community)
#prosumer = file_community.sheet_names
# Electricity demand, PV generation, and other prosumer data
variable_load = 'Final Energy|Residential and Commercial|Electricity'
variable_PV = 'Secondary Energy|Electricity|Solar|PV'
SoC_max = 'Maximum Storage|Electricity|Energy Storage System'
SoC_min = 'Minimum Storage|Electricity|Energy Storage System'
q_bat_max = 'Maximum Charge|Electricity|Energy Storage System'
q_bat_min = 'Maximum Discharge|Electricity|Energy Storage System'
PV_capacity = 'Maximum Active power|Electricity|Solar'
w = 'Price|Carbon'
_a = [SoC_max, SoC_min, q_bat_max, q_bat_min, PV_capacity, w]
load = pd.DataFrame()
PV = pd.DataFrame()
prosumer_data = pd.DataFrame()
for i in prosumer:
# read excel sheet and convert to pyam.IamDataFrame
_df = pd.read_csv(prosumer_files[i], sep=';')
_df_pyam = pyam.IamDataFrame(_df)
# filter data (load)
_data = (_df_pyam
.filter(variable=variable_load)
.filter(region=region_name)
.filter(model=model_name)
.filter(scenario=scenario_name)
.filter(time=time_steps))
_b = _data.as_pandas().set_index('time')
load = pd.concat([load, _b['value'].reindex(time_steps)],
axis=1).rename(columns={'value':i})
# filter data (PV)
_data = (_df_pyam
.filter(variable=variable_PV)
.filter(region=region_name)
.filter(model=model_name)
.filter(scenario=scenario_name)
.filter(time=time_steps))
_b = _data.as_pandas().set_index('time')
PV = pd.concat([PV, _b['value'].reindex(time_steps)],
axis=1).rename(columns={'value':i})
# Prosumer data (other)
_data = (_df_pyam
.filter(variable=_a)
.filter(region=region_name)
.filter(model=model_name)
.filter(scenario=scenario_name))
_b = _data.as_pandas().set_index('variable')
prosumer_data = pd.concat([prosumer_data, _b['value'].reindex(_a)],
axis=1).rename(columns={'value':i})
# Prices
_df = | pd.read_csv(filename_grid, sep=';') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 13:31:13 2019
@author: mehrdad
"""
import pandas as pd
import numpy as np
import tslib.trip_detection
# Compute the difference between observed trips and computed trips ----------------------
# Any mode to any mode
def compute_observed_vs_computed_diffs(observed_, computed_):
M1 = pd.merge(observed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
computed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
left_index=True, right_index=True,
how='left',
suffixes=['_observed', '_alt'],
indicator=True)
#TOOD: whenever need the user-trip-plan_id column values as columns:
# ... M1.reset_index()
diff = pd.DataFrame()
#index is automatically created by this first insert!!!
# plan_id part of the index is that of the computed
# diff['user'] = M1.user
# diff['trip'] = M1.trip
# diff['plan_id'] = M1.plan_id
diff['has_any_computed'] = (M1._merge=='both')
diff['deltaT'] = M1.duration_in_min_observed - M1.duration_in_min_alt
diff['deltaE'] = M1.emission_observed - M1.emission_alt
diff['deltaD'] = M1.distance_observed - M1.distance_alt
diff['delta_walk_D'] = M1.walk_distance_observed - M1.walk_distance_alt
diff['delta_bike_D'] = M1.bike_distance_observed - M1.bike_distance_alt
diff['delta_AD'] = M1.active_distance_observed - M1.active_distance_alt
return diff
# Choose the desired time-relevant low-carbon alternative --------------------------------
# Selection is made in comparison to the observed trip's attributes
def compute_substitutes(observed_, computed_, observed_vs_computed_deltas_, with_ebike):
alts = observed_vs_computed_deltas_[observed_vs_computed_deltas_.has_any_computed]
alts = alts.drop(columns='has_any_computed')
alts = pd.merge(alts, computed_[['mode']], left_index=True, right_index=True, how='inner')
alts.rename(columns={'mode':'mode_alt'}, inplace = True)
# Consider e-bike or not
if not with_ebike:
alts = alts[alts.mode_alt != 'EBICYCLE']
# Skip bike for bad weather months
alts = | pd.merge(alts, observed_[['month']], left_index=True, right_index=True, how='inner') | pandas.merge |
"""
Plaster-specific plots
These fall into two categories:
* Mature: plots that are ready to be used in notebook report templates
* Development: Plots that are still being worked on across various notebooks
Note:
* All plots are free-functions
* All plots should accept a run parameters and *optional* modifications to
the default plot.
* Naming conventions:
def text_* dump information as print statements
def plot_* are plots
def wizard_* are interactive wizard-type components that may have plot components
* bokeh imports should be deferred (they tend to slow down tests)
"""
import numpy as np
import pandas as pd
from plaster.run.lnfit.lnfit_result import LNFitResult
from plaster.tools.ipynb_helpers.displays import hd, md
from plaster.tools.schema import check
from plaster.tools.utils import utils
from plaster.tools.zplots.zplots import ZPlots
from plaster.tools.zlog.zlog import tell
import logging
log = logging.getLogger(__name__)
# Mature
# ====================================================================================================
# On some of our plots, e.g. peptide PR, if there are too many traces it causes the browser
# to really chug, even when just scrolling the page because something is getting updated/redrawn.
# Short of figuring out how to do this better, many fns avoid too many traces, so define a
# constant that can be used in multiple places.
MAX_BOKEH_PLOT_TRACES = 50
# Prep & sim related
# -----------------------
def text_prep_and_sim_info(run):
"""
State statistics about labelling and sim including
protein identifiability under the labelling scheme.
Audience:
Basic users
Goal:
Allow the user to understand the experimental setup
and the high-level overview of the label space.
"""
if hasattr(run, "sim_v1"):
n_train_non_zero_recall_peps = (run.sim_v1.train_pep_recalls > 0.0).sum()
n_train_zero_recall_peps = (run.sim_v1.train_pep_recalls == 0.0).sum()
n_train_peps = n_train_non_zero_recall_peps + n_train_zero_recall_peps
n_test_non_zero_recall_peps = (run.sim_v1.test_recalls > 0.0).sum()
n_test_zero_recall_peps = (run.sim_v1.test_recalls == 0.0).sum()
n_test_peps = n_test_non_zero_recall_peps + n_test_zero_recall_peps
print(
f"The preparation consisted of:\n"
f" Proteins: {len(run.prep.pros()) - 1}.\n"
f" Of which: {len(run.prep.pros__from_decoys())} are decoys generated by the {run.prep.params.decoy_mode} method.\n"
f" Labels: {run.sim_v1.params.to_label_str()}.\n"
f" Protease: {run.prep.params.protease} including {run.prep.params.include_misses} missed cleavages.\n"
f" Peptides: {len(run.prep.peps()) - 1}.\n"
f" Unique: {len(run.prep.pepstrs().seqstr.drop_duplicates()) - 1}\n"
f" From real sources: {len(run.prep.peps__no_decoys()) - 1}\n"
f" From decoy sources: {len(run.prep.peps__from_decoys())}\n"
f"Simulation was run with:\n"
f" n_pres: {run.sim_v1.params.n_pres}, n_mocks: {run.sim_v1.params.n_mocks}, n_edmans: {run.sim_v1.params.n_edmans}\n"
f" Train set:\n"
f" {n_train_non_zero_recall_peps} ({100 * n_train_non_zero_recall_peps / n_train_peps:2.0f}%) have positive recall (observable)\n"
f" {n_train_zero_recall_peps} ({100 * n_train_zero_recall_peps / n_train_peps:2.0f}%) had zero recall (unlabelable)\n"
f" Test set:\n"
f" {n_test_non_zero_recall_peps} ({100 * n_test_non_zero_recall_peps / n_test_peps:2.0f}%) have positive recall (observable)\n"
f" {n_test_zero_recall_peps} ({100 * n_test_zero_recall_peps / n_test_peps:2.0f}%) had zero recall (unlabelable)\n"
)
else:
# TODO: find sim_v2 appropriate replacements for sim_v1 params listed above
print(
f"The preparation consisted of:\n"
f" Proteins: {len(run.prep.pros()) - 1}.\n"
f" Of which: {len(run.prep.pros__from_decoys())} are decoys generated by the {run.prep.params.decoy_mode} method.\n"
f" Protease: {run.prep.params.protease} including {run.prep.params.include_misses} missed cleavages.\n"
f" Peptides: {len(run.prep.peps()) - 1}.\n"
f" Unique: {len(run.prep.pepstrs().seqstr.drop_duplicates()) - 1}\n"
f" From real sources: {len(run.prep.peps__no_decoys()) - 1}\n"
f" From decoy sources: {len(run.prep.peps__from_decoys())}\n"
f"Simulation was run with sim_v2.\n"
)
# TODO: if the run has a "protein of interest" then repeat a lot of the above for
# just the protein of interest -- e.g. how many peptides, unique, positive recall, etc.
if (
run.prep.n_pois > 0 and run.prep.n_pros > 2
): # ==2 means 1 protein and the 'null' entry
pro_ids = run.prep.pros__pois().pro_id
pep_iz = run.prep.peps__pois().pep_i.unique()
pepstrs = run.prep.pepstrs()
pepstrs = pepstrs[pepstrs.pep_i.isin(pep_iz)]
poi_string = f"{list(pro_ids)}" if len(pro_ids) < 10 else f"{len(pro_ids)}"
print(
f"\n\n"
f"Protein(s) of interest: {poi_string}\n"
f" Peptides: {len(pep_iz)}\n"
f" Unique: {len(pepstrs.seqstr.drop_duplicates())}\n"
f" TODO: more stats on just this protein(s)\n"
)
# Classification related
# -----------------------
def text_call_score_info(run, classifier=None):
"""
State statistics about classification.
Audience:
Advanced users
Goal:
Allow the user to see:
How many uniq FluoroSeqs, PeptideSeqs, and ProteinSeqs were called true and pred.
How many real vs decoys were called with counts
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
"""
bag = run.test_call_bag(classifier=classifier)
n_zeros = (bag.pred_pep_iz == 0).sum()
pred = bag.pred_peps__pros()
n_pred_real = (pred.pro_is_decoy < 1).sum()
n_pred_decoy = (pred.pro_is_decoy > 0).sum()
n_preds = len(pred)
print(
f"Test {bag.classifier_name.upper()} classification result has {bag.n_rows} rows.\n"
f" Of which: {n_zeros} were predicted to empty.\n"
f"True calls came from:\n"
f" {len(bag.true_peps__pros())} samples over {bag.true_peps__pros().groupby('pep_i').ngroups} total peptides of which {len(bag.true_peps__unique())} were unique\n"
f"Predicted calls went to:\n"
f" {len(bag.pred_peps__pros())} samples covering {len(bag.pred_peps__unique())} unique peptides\n"
f" {100.0 * n_pred_real / n_preds:2.0f}% reals and {100.0 * n_pred_decoy / n_preds:2.0f}% decoys\n"
)
def pep_iz_in_report(run, with_ptms=False):
if with_ptms:
return run.prep.peps__ptms(poi_only=True, ptms_to_rows=False).pep_i.unique()
return run.prep.peps__pois().pep_i.values
def plot_peptide_effective_labelability(run, **kwargs):
"""
'effective labelability' means how often does a peptide result in a dye-track signature that is
non-zero. A 'zero' signature might occur because the peptide detaches immediately, a dye
is a dud, a dye bleaches immediately (in a mock cycle), or, most typically, that the peptide
has no amino acids that can be labeled. The latter is 'unlabelable'. All the others are
'effectively unlabelable'
I am using the name 'labelability' because the next fn in this file plots observability vs
precision, and it is confusing to use the word observability for both plots. I have also
considered "scope_observability" vs "classifier_observability". The confusion arises when
you look at the plot produced by this function, which looks only at "train recalls", and see
that 100%, or nearly 100%, of peptides are "observable". And then in the next set of plots
which look at PR for peptides, you see that only 80% of the peptides are observable even at
precision=0. How can this be? It is because even if a peptide has labels and produces a
non-zero dye-track (so it is "scope observable" or "labelable"), it may NEVER get picked
by the classifier because some other peptide class receives a higher score. So in our
"post classification" notion of PR, even at precision 0, the peptide is not observable
by the classifier.
Y axis is the fraction 'scope' observable, or 'effectively labelable'
X axis is rank ordered peptides where peptide with highest “frac observable” is on the left.
Some of the peptides will have zero observability, we call those by the special name “unlabelable”
Up to three plots may be displayed:
1. All peptides the classifier will be trained on
2. Only peptides belonging to any "protein_of_interest"; see set_protein_of_interest()
3. Only peptides satisfying (2) and additionally containing PTM locations
Goal:
Allow user to see:
What portion/how well the peptides can be "seen" given the current labelling scheme.
"""
pep_iz_poi = pep_iz_in_report(run, with_ptms=False)
pep_iz_ptm = pep_iz_in_report(run, with_ptms=True)
pep_indices = [np.array(range(1, run.sim_v2.train_pep_recalls.shape[0]))]
if len(pep_iz_poi):
pep_indices.append(np.sort(pep_iz_poi))
if len(pep_iz_ptm):
pep_indices.append(np.sort(pep_iz_ptm))
z = ZPlots.zplot_singleton
with z(
_cols=len(pep_indices),
f_y_axis_label="per-class fraction labelable",
f_x_axis_label="peptide class rank",
f_y_range=(0, 1),
**kwargs,
):
for idx, domain in zip(pep_indices, ["", "POI ", "PTM "]):
train_pep_recalls = run.sim_v2.train_pep_recalls[idx]
train_pep_recalls_sorted = np.sort(train_pep_recalls)[::-1]
n_classes = len(train_pep_recalls_sorted)
n_observable = np.sum(train_pep_recalls_sorted.astype(bool))
z.cols(
train_pep_recalls_sorted,
f_title=f"{domain}{n_observable} of {n_classes} ({100.0 * n_observable / n_classes:,.1f}%) peptides labelable",
)
def plot_peptides_per_fluorosequence(run, **kwargs):
peps__flus = (
run.sim_v2.peps__flus(run.prep)
.drop_duplicates("flustr")
.sort_values("flu_count", ascending=False)
.reset_index()
)
labels = peps__flus.apply(lambda x: f"{x.flustr} ({x.flu_count})", axis=1)
z = kwargs.pop("_zplot_context", ZPlots())
# Note 1: below -- we are not showing the unlabeled peptides.
z.cols(
peps__flus.flu_count.values[1:],
_label=labels.values[1:],
_size_x=1000,
f_title="(Labelable) Peptides per fluorosequence",
f_x_axis_label="fluorsequence class rank",
f_y_axis_label="peptide count",
**kwargs,
)
def _plot_peptide_observability_vs_precision(
pr_df, pep_iz=None, as_fraction_observable=True, pr_axes=True, **kwargs
):
"""
given a precision-recall information for a set of peptides, plot
the fraction of those pep_i observable as a function of precision.
pr_df: a df that contains precision,recall values for each pep_i
pep_iz: the subset of pep_i to consider, or None or all pep_i
as_fraction_observable: when True show observability as fraction of total peptides
pr_axes: when True orient the plot as standard PR plots, with PEPTIDE
recall on the x-axis.
Y axis is number of classes or fraction of classes observable at precision X
X axis is precision
Goal:
Answer question:
How many of the peptide classes can I observe at precision X?
(or, equivalently)
At precision X, how many peptide classes have a recall > 0?
"""
pep_iz_with_pr = list(pr_df.pep_i.unique())
# no_pr = [pi for pi in pep_iz if pi not in pep_iz_with_pr ]
# print(no_pr)
assert pep_iz is None or all(
[pi in pep_iz_with_pr for pi in pep_iz]
), "pr_df must contain PR info for all pep_iz"
if pep_iz is None:
pep_iz = pep_iz_with_pr
pr_by_pep = pr_df[pr_df.pep_i.isin(pep_iz)]
n_peps_at_precision = []
precisions = np.linspace(0, 1, 51)
for prec in precisions:
peps_observable_at_prec = pr_by_pep[
(pr_by_pep.prec >= prec) & (pr_by_pep.recall > 0)
].pep_i.unique()
n_peps_at_precision += [len(peps_observable_at_prec)]
n_classes = len(pep_iz)
y_range = (0, n_classes)
if as_fraction_observable:
n_peps_at_precision = np.array(n_peps_at_precision) / n_classes
y_range = (0, 1.05)
x_range = (0, 1.05)
y_axis = (
"fraction of classes observable"
if as_fraction_observable
else "peptide classes observable"
)
x_axis = "precision"
y_values = n_peps_at_precision
x_values = precisions
if not pr_axes: # because this was the default for a long time
args = dict(
x=x_values,
y=y_values,
f_x_axis_label=x_axis,
f_y_axis_label=y_axis,
f_x_range=x_range,
f_y_range=y_range,
)
else:
args = dict(
y=x_values,
x=y_values,
f_y_axis_label=x_axis,
f_x_axis_label="peptide-classes recall",
f_y_range=x_range,
f_x_range=y_range,
)
z = ZPlots.zplot_singleton
z.line(**args, **kwargs, line_width=2)
def plot_peptide_observability_vs_precision(
run,
pep_iz=None,
pep_subset="all",
as_fraction_observable=True,
pr_axes=True,
classifier=None,
**kwargs,
):
"""
See docs at _plot_peptide_observability_vs_precision()
pep_iz: list of peptide indices that should be plotted, or None. May
be used together with pep_subset.
pep_subset: named subset controlling which portion of peptides the test_call_bag
that should be plotted. Valid subset names are all, poi, and ptm.
pr_style_axes: If True, orient the axes as a standard PR plot, with PEPTIDE
recall on the x-axis. Otherwise, interpret as
"peptide observability vs precision"
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
"""
bag = run.test_call_bag(classifier=classifier)
pr_by_pep_all = bag.pr_curve_by_pep()
domain = ""
if pep_subset == "all":
pep_iz_subset = list(pr_by_pep_all.pep_i.unique())
elif pep_subset == "poi":
pep_iz_subset = pep_iz_in_report(run, with_ptms=False)
domain = "POI "
elif pep_subset == "ptm":
pep_iz_subset = pep_iz_in_report(run, with_ptms=True)
domain = "PTM "
else:
raise ValueError("pep_subset must be all, poi, or ptm")
if pep_iz is not None and len(pep_iz) > 0:
pep_iz_subset = [pi for pi in pep_iz_subset if pi in pep_iz]
z = ZPlots.zplot_singleton
with z():
vs_text = (
"Peptide-Classes PR" if pr_axes else "Peptide Observability vs Precision"
)
title = f"{bag.classifier_name.upper()} {domain}{vs_text}, {len(pep_iz_subset)} classes"
_plot_peptide_observability_vs_precision(
pr_by_pep_all,
pep_iz_subset,
as_fraction_observable=as_fraction_observable,
pr_axes=pr_axes,
f_title=title,
**kwargs,
)
def plot_call_score_hist(run, classifier=None, **kwargs):
"""
Demonstrate an overview of classification scores for predictions to non-decoy versus decoys.
Audience:
Advanced users
Goal:
Allow the user to see:
if the decoy and real false rates have roughly equal distributions
if high scores are a good predictor of correctness.
Plan:
This is probably not a long-term useful plot as the imposter map
will probably communicate most of this information in a more useful way.
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
"""
bag = run.test_call_bag(classifier=classifier)
z = ZPlots.zplot_singleton
with z(
_bins=np.linspace(0, 1, 20),
_merge=True,
line_alpha=1,
f_plot_width=500,
f_plot_height=250,
f_x_range=(0, 1),
f_y_range=(0, 0.9),
f_x_axis_label="Score",
f_y_axis_label="Count",
f_title=f"Normalized distribution of call scores - {bag.classifier_name.upper()}",
_step=True,
_legend_click_policy="hide",
):
correct_mask = bag.true_pep_iz == bag.pred_pep_iz
real_mask = bag.pred_peps__pros().pro_is_decoy < 1
decoy_mask = bag.pred_peps__pros().pro_is_decoy > 0
mask = correct_mask & real_mask
if np.any(mask):
z.hist(
bag.scores[mask],
_normalizer=mask.sum(),
line_color=ZPlots.feature,
line_width=3,
legend_label="Correct calls predicted to an any-real source",
)
wrong_mask = ~correct_mask
mask = wrong_mask & real_mask
if np.any(mask):
z.hist(
bag.scores[mask],
_normalizer=mask.sum(),
line_color=ZPlots.compare1,
line_width=2,
line_dash=[2, 2],
legend_label="Wrong calls predicted to an any-real source",
)
if np.any(decoy_mask):
z.hist(
bag.scores[mask],
_normalizer=decoy_mask.sum(),
line_color=ZPlots.compare2,
line_width=2,
line_dash=[5, 3],
legend_label="Any calls predicted to an any-decoy source",
)
def _plot_pr_curve(prsa, **kwargs):
"""
Plot one pr curve.
Note that the pr_curve might have a very large number of points
and this makes the plotting slow. The information at the top (high prec)
is usually more interesting so we sample from the set logarithmically.
Arguments:
prsa: a 4-tuple of precision, recall, min_score, AUC generated by AssignmentEval.pr_curve or similar.
TASK
I want this to have hover tools but I need to sort out hovers using zplots.
But I also need this to be zplot mergable so for now I'm taking out the hover.
"""
check.affirm(len(prsa) == 4)
check.list_or_tuple_t(prsa[0:2], np.ndarray)
n_rows = prsa[0].shape[0]
check.array_t(prsa[0], (n_rows,))
check.array_t(prsa[1], (n_rows,))
n_prs = len(prsa[0])
if n_prs <= 1:
log.error(f"Unable to show plot of pr_curve with n_prs of {n_prs}")
return
kwargs = utils.set_defaults(kwargs, line_width=2, line_alpha=1.0, _noise=0.000)
# Add noise to keep them from sitting on top of each other
# An _noise of about 0.005 is typically sufficient if any is needed
_noise = kwargs.pop("_noise")
x_noise = np.random.uniform(-_noise, +_noise, size=n_rows)
y_noise = np.random.uniform(-_noise, +_noise, size=n_rows)
z = ZPlots.zplot_singleton
z.line(
x=prsa[1] + x_noise,
y=prsa[0] + y_noise,
_range=(0, 1.05, 0, 1.05),
f_x_axis_label="read recall",
f_y_axis_label="precision",
f_title=kwargs.pop("f_title", "Precision-Recall"),
**kwargs,
)
def plot_pr_aggregate(run, pep_iz=None, classifier=None, **kwargs):
"""
Show P/R for all some set aggregate set of peptides
(ie. renders a single line for the set. See also: plot_pr_breakout)
Arguments:
pep_iz: If None computes over all peps, otherwise the subset
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
kwargs: Passed to the zplot
Audience:
Trained users.
P/R curves will not be familiar to many customers but it is an important concept to teach
as it is central to the way we treat evidence.
Goal:
Allow user to see:
The P/R curve for ALL peptides as an aggregate statistic
Plan:
There's a lot of way to look at this and it probably needs
to be subdivided into separate curves or made into an interactive widget.
In general, I'm going to avoid having lots of switches on this
and instead rely on with z.Opts(_merge=True) when the need to be overlaid.
"""
cb = run.test_call_bag(classifier=classifier)
prsa = cb.pr_curve_pep(pep_iz_subset=pep_iz)
utils.set_defaults(
kwargs, f_title=f"{cb.classifier_name.upper()} P/R over all peptides"
)
_plot_pr_curve(prsa, **kwargs)
def plot_pr_breakout(run, pep_iz=None, classifier=None, **kwargs):
"""
Render a separate P/R curve for each pep_iz.
See plot_pr_aggregate()
Arguments:
If pep_iz is None it renders ALL peptides. (Might be large!)
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
kwargs: passed to zplot
"""
master_bag = run.test_call_bag(classifier=classifier)
if pep_iz is None:
pep_iz = list(range(1, run.prep.n_peps))
utils.set_defaults(
kwargs, f_title=f"{master_bag.classifier_name.upper()} P/R over all peptides"
)
z = ZPlots.zplot_singleton
user_color = kwargs.get("color", None)
with z(
_merge=True,
_range=(0, 1.05, 0, 1.05),
f_x_axis_label="read recall",
f_y_axis_label="precision",
**kwargs,
):
pr_df = master_bag.pr_curve_by_pep(pep_iz=pep_iz)
peps_flus_df = master_bag.peps__pepstrs__flustrs__p2()
for pep_i, g in pr_df.groupby("pep_i"):
prsa = (g.prec.values, g.recall.values, g.score.values, [])
# for pep_i in pep_iz:
# prsa = master_bag.pr_curve_by_pep(pep_iz_subset=[pep_i])
row = peps_flus_df[peps_flus_df.pep_i == pep_i].iloc[0]
legend_label = f"pep{pep_i:03d}"
line_label = f"{row.pro_id} pep{pep_i:03d} {row.seqstr} {row.flustr}"
_plot_pr_curve(
prsa,
line_color=user_color or z.next(),
_label=line_label,
legend_label=legend_label,
**kwargs,
)
# this will only get plotted if all class scores are available:
prsa = master_bag.pr_curve_sklearn(pep_i)
if prsa[0] is not None:
_plot_pr_curve(prsa, line_width=1, **kwargs)
def plot_pr_for_run(
run, aggregate_only=False, force_all_proteins=False, classifier=None, **kwargs
):
"""
Display one or more PR plots depending on the config of run.
Plot aggregate and breakout PRs for protein(s) of interest if any are set,
otherwise plot aggregate of all proteins/peptides.
Note that individual PR curves are not plotted if the number of peptides exceeds
some value (currently 50) because of some slowdown in notebooks when there are
lots of individual curves. This slowdown needs to be investigated - it should be
able to do hundreds without any issue. Right?
aggregate_only: Do not plot PR curves for any individual peptides, whether the domain
is all proteins, or proteins of interest, or proteins with PTMs.
force_all_proteins: Plot PR for *all* proteins (always an aggregate PR) even if there
are proteins-of-interest specified for this run.
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
kwargs: passed to zplot
NOTE: in my experience, aggregate PR curves are not very informative.
"""
z = ZPlots.zplot_singleton
zOpts = (
{}
if (
aggregate_only
or run.prep.n_pois == 0
or run.prep.n_pois > MAX_BOKEH_PLOT_TRACES
)
else {"_cols": 3}
)
with z(**zOpts):
z.color_reset()
# *Up to* three columns, if we have interesting domains to look at.
if run.prep.n_pois > 0 and not force_all_proteins:
pep_iz = run.prep.peps__pois().pep_i.unique()
n_peps = len(pep_iz)
if n_peps > 1 or aggregate_only:
plot_pr_aggregate(
run,
pep_iz=pep_iz,
classifier=classifier,
f_title=f"{classifier.upper()} Aggregate PR ({n_peps} peptides of interest)",
**kwargs,
)
# maybe do breakout of individual peps of interest
if not aggregate_only:
if n_peps < MAX_BOKEH_PLOT_TRACES: # See comment at top.
plot_pr_breakout(
run,
pep_iz=pep_iz,
classifier=classifier,
f_title=f"{classifier.upper()} Individual PR ({n_peps} peptides of interest)",
**kwargs,
)
else:
tell(
f"Individual peptide PR for protein-of-interest skipped because plot with {n_peps} peptides may be slow."
)
# maybe do breakout of peps of interest that have PTM locations
df = run.prep.peps__ptms(poi_only=True, ptms_to_rows=False)
pep_w_ptm_iz = df.pep_i.values
n_peps_w_ptm = len(pep_w_ptm_iz)
if n_peps_w_ptm > 0 and not aggregate_only and n_peps_w_ptm != n_peps:
if n_peps_w_ptm < MAX_BOKEH_PLOT_TRACES: # See comment at top.
plot_pr_breakout(
run,
pep_iz=pep_w_ptm_iz,
classifier=classifier,
f_title=f"{classifier.upper()} Individual PR ({n_peps_w_ptm} peptides w PTMs)",
**kwargs,
)
else:
tell(
f"Individual PTM-peptide PR skipped because plot with {n_peps_w_ptm} peptides may be slow."
)
else:
# All proteins, less the "null" entry at 0
n_peps = run.prep.n_peps - 1
n_pros = run.prep.n_pros - 1
plot_pr_aggregate(
run,
classifier=classifier,
f_title=f"{classifier.upper()} Aggregate PR ({n_pros} proteins, {n_peps} peptides)",
**kwargs,
)
def plot_pr_for_job(job, force_all_proteins=False, classifier="nn_v2", **kwargs):
"""
Single plot containing relevant PR per run, with legend to indicate run.
force_all_proteins: Plot PR for *all* proteins (aggregate) even if there
are proteins-of-interest specified for this run.
classifier: None to use any available preferred classifier, or one of the
supported classifiers in RunResult::test_call_bag(), e.g. 'rf', 'nn'
kwargs: passed to zplot
"""
domain = f"all proteins, observable peptides per run"
run = job.runs[0] # get proteins_of_interest, same for each run
if run.prep.n_pois > 0 and not force_all_proteins:
domain = (
f"{run.prep.n_pois} protein(s) of interest, observable peptides per run"
)
from bokeh.palettes import Category20
palette = Category20[20]
z = ZPlots()
with z(
_merge=True,
f_title=f"PR all runs, {domain}",
f_x_axis_label="read recall",
f_y_axis_label="precision",
):
for i, run in enumerate(job.runs):
color = palette[i % 20]
name = "_".join(run.run_name.split("_")[:-1])
plot_pr_for_run(
run,
aggregate_only=True,
force_all_proteins=force_all_proteins,
classifier=classifier,
color=color,
legend_label=name,
**kwargs,
)
def standard_run_report(run, display_run_title=True, classifier=None):
if display_run_title:
hd("h2", f"Run: {run.run_name}")
# Information from Prep and Simulation that has nothing
# to do with classification
hd("h3", f"Prep and simulation")
text_prep_and_sim_info(run)
if hasattr(run, "sim_v2"):
plot_peptide_effective_labelability(run)
plot_peptides_per_fluorosequence(run)
# If no classifier is specified, print information from all available
# classifiers, otherwise just the one that was requested.
hd("h3", f"Classification")
classifiers = run.get_available_classifiers()
if classifier in classifiers:
classifiers = [classifier]
# Do textual information on classifiers one after the other.
for classifier in classifiers:
text_call_score_info(run, classifier=classifier)
z = ZPlots()
# Do call/score histograms next to each other
with z(_cols=len(classifiers)):
for classifier in classifiers:
plot_call_score_hist(run, classifier=classifier)
# Do PR plots for each classifier grouped together. If there are proteins of
# interest, then each call will already result in multiple plots, but if there are
# not, each call will only be one plot, so in that case group them on the same
# to to make comparison between classifiers easier.
zOpts = {}
if run.prep.n_pois == 0 or run.prep.n_pois > MAX_BOKEH_PLOT_TRACES:
zOpts = {"_cols": 2}
with z(**zOpts):
for classifier in classifiers:
plot_peptide_observability_vs_precision(run, classifier=classifier)
plot_pr_for_run(run, _noise=0.01, classifier=classifier)
# In development
# ====================================================================================================
def plot_channel_signal_scatter(run, ch0=0, ch1=1, **kwargs):
sample_size = kwargs.get("sample_size", 500)
n_std_plot_range = kwargs.get("n_std_plot_range", 4)
sigproc = run.sigproc_v1 if run.has_result("sigproc_v1") else run.sigproc_v2
r = sigproc.radmats()
n_peaks = len(r.peak_i.unique())
title = (
f"ch{ch1} vs ch{ch0} signal, n={sample_size}, axis-range={n_std_plot_range}-STD"
)
md(f"## {title}")
sample = np.random.choice(range(n_peaks), replace=False, size=sample_size)
max_x = max_y = 0
z = ZPlots()
with z(_cols=4, f_plot_width=250, f_plot_height=280):
for cy in range(sigproc.n_cycles):
x = r[(r.cycle_i == cy) & (r.channel_i == ch0)].signal.values
y = r[(r.cycle_i == cy) & (r.channel_i == ch1)].signal.values
x_sample = x[sample]
y_sample = y[sample]
if max_x == 0:
max_x = np.mean(x_sample) + np.std(x_sample) * n_std_plot_range
max_y = np.mean(y_sample) + np.std(y_sample) * n_std_plot_range
z.scat(
x=x_sample,
y=y_sample,
f_x_axis_label=f"signal, channel {ch0}",
f_y_axis_label=f"signal, channel {ch1}",
f_title=f"cycle {cy}",
f_x_range=(
1
if kwargs.get("f_x_axis_type") == "log"
else 0, # Bokeh has issues with zeros in log scale plots https://github.com/bokeh/bokeh/issues/6536
max_x,
),
f_y_range=(0, max_y),
**kwargs,
)
def text_lnfit_links(run):
from plumbum import local
names = LNFitResult.task_names(run)
tell(f"lnfit tasks in run: {names}")
# We can use any name to get an lnfit result object
# which deals with multiple lnfits per run for us.
lnfit = run[names[0]]
cwd = local.cwd
for ch, html_files in enumerate(lnfit.html_files()):
if not html_files:
# TODO: should we print some kind of warning here? Due to the structure of the data returned by LNFitResult.html_files,
# if html_files is empty then there's not a whole lot of other information to relay to the user.
continue
task_folder = html_files[0].split()[-2]
md(f"### {task_folder}")
m = ""
for f in sorted(html_files):
relative_path = f.relative_to(cwd)
m += f"* [{f.name}]({relative_path})\n"
md(m)
def plot_signal_for_lnfit_sequence(run, channel, sequence, lnfit_taskname=None):
# Used in lnfit_template
"""
Plot signal vs cycle for all peaks with a given lnfit sequence
on a particular channel.
sequence is e.g. '22221111000000'
"""
names = LNFitResult.task_names(run)
lnfit = run[names[0]]
df_ln = lnfit.lnfit_bestseq_df(
lnfit_taskname
) # contains lnfit bestseq per peak_i and channel;
sigproc = run.sigproc_v1 if run.has_result("sigproc_v1") else run.sigproc_v2
df = sigproc.fields__n_peaks__peaks__radmat()
df = | pd.merge(df_ln, df, how="left", on=["peak_i", "channel_i"]) | pandas.merge |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
def test_to_csv_ambiguous(self):
# Given/When
self.af_amb.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES_AMB)
# Then
pdtest.assert_frame_equal(result, expected)
@staticmethod
def _dict_almost_equal(expected: dict, result: dict, acc=10**-8) -> bool:
"""Compare to dictionaries and ensure that all their values are the
same, accounting for some fluctuation up to the given accuracy value.
Args:
expected: expected dictionary
result: resulting dictionary
acc: accuracy to use [default: 10**-8]
"""
if expected.keys() == result.keys():
for key in expected.keys():
if abs(expected[key] - result[key]) < acc:
continue
return True
return False
# From Fasta
class TestFromFasta(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromFastaNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_NOREF_FASTA,
reference=SAMPLE_REF_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# From Csv
class TestFromCsv(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromCsvNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_NOREF_CSV,
reference=SAMPLE_REF_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# Real Datasets
class TestRealDatasetsX(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_X_FASTA)
def test_df(self):
# Given/When
exp_df = pd.read_csv(REAL_ALG_X_DF, index_col=0)
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = pd.read_csv(REAL_X_FREQUENCIES)
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/when
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(REAL_X_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestRealDatasetsXNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_X_NOREF_FASTA,
reference=REAL_RSRS_FASTA)
def test_df(self):
# Given/When
exp_df = pd.read_csv(REAL_ALG_X_DF, index_col=0)
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = pd.read_csv(REAL_X_FREQUENCIES)
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(REAL_X_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestRealDatasetsL6(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_L6_FASTA)
def test_df(self):
# Given/When
exp_df = pd.read_csv(REAL_ALG_L6_DF, index_col=0)
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = pd.read_csv(REAL_L6_FREQUENCIES)
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(REAL_L6_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestRealDatasetsL6NoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_L6_NOREF_FASTA,
reference=REAL_RSRS_FASTA)
def test_df(self):
# Given/When
exp_df = pd.read_csv(REAL_ALG_L6_DF, index_col=0)
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = pd.read_csv(REAL_L6_FREQUENCIES)
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(REAL_L6_FREQUENCIES)
# Then
| pdtest.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import requests
import urllib.request
import json
import pandas as pd
import csv
# 利用colab云上编译
# 保存文件
# from pydrive.auth import GoogleAuth
# from pydrive.drive import GoogleDrive
# from google.colab import auth
# from oauth2client.client import GoogleCredentials
# 爬取所有搜索页面的url
def url_load(url):
print(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url,None,headers = headers)
#print(req)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, features='lxml')
centent = soup.find('div', {"class": "category-products"})
b = centent.find_all('li')
url_all = []
name_all = []
size_all = []
prize_all = []
# 爬取各搜索页中各详情页的url
for a in b:
all_href = a.find_all('a',{"class":"product-image"})
#print(all_href)
# 进入详情页,爬取各页面的详情参数
for l in all_href:
#print(l['href'])
#list[] = l['href']
e = l['href']
u = printurl(e)
url_all.append(u)
#print(u)
n = printname(e)
name_all.append(n)
s = printsize(e)
size_all.append(s)
p = printprize(e)
prize_all.append(p)
#print(au)
data = {
'url' : url_all,
'name' : name_all,
'size' : size_all,
'prize' : prize_all
}
#print(data)
#data_csv = pd.DataFrame(data)
return data
#data_csv.to_csv("data.csv",index=False,sep=',')
#print(u)
# 爬取详情页链接
def printurl(url):
print(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url,None,headers = headers)
#print(req)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, features='lxml')
url_single = []
#for i in range(1,24):
url_single.append(url)
#print(url_single)
return url_single
# 爬取详情页产品名称
def printname(url):
#print(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url,None,headers = headers)
#print(req)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, features='lxml')
#<h1 class="product-name" itemprop="name">
h = soup.find('h1',{"class":"product-name"})
#print(h.get_text())
name_single = []
#for i in range(1,24):
name_single.append(h.get_text())
return name_single
# 爬取详情页产品的鞋码
def printsize(url):
#print(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url,None,headers = headers)
#print(req)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, features='lxml')
#<span class="product-sizes__size">7*</span>
s = soup.find_all('span',{"class":"product-sizes__size"})
size_single = []
for size in s:
#print(size.get_text())
size_single.append(size.get_text())
return size_single
# 爬取详情页产品的价格
def printprize(url):
#print(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url,None,headers = headers)
#print(req)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, features='lxml')
#<span class="product-sizes__price" data-selected-price>CN¥5,296.00</span>
p = soup.find_all('span',{"class":"product-sizes__price"})
#print(p.get_text())
prize_single = []
for prize in p:
#print(prize.get_text())
prize_single.append(prize.get_text())
return prize_single
#def write_csv(content):
if __name__ == '__main__':
# 以Air Jordan 1 为例
url = 'https://www.stadiumgoods.com/air-jordan/air-jordan-1'
# 开始爬取
x = url_load(url)
# 分页面保存
data_csv = | pd.DataFrame(x) | pandas.DataFrame |
import inspect
import json
import os
import re
from urllib.parse import quote
from urllib.request import urlopen
import pandas as pd
import param
from .configuration import DEFAULTS
class TutorialData(param.Parameterized):
label = param.String(allow_None=True)
raw = param.Boolean()
verbose = param.Boolean()
return_meta = param.Boolean()
use_cache = param.Boolean()
_source = None
_base_url = None
_data_url = None
_description = None
def __init__(self, **kwds):
super().__init__(**kwds)
self._cache_dir = DEFAULTS["cache_kwds"]["directory"]
self._remove_href = re.compile(r"<(a|/a).*?>")
os.makedirs(self._cache_dir, exist_ok=True)
self._init_owid()
@property
def _cache_path(self):
cache_file = f"{self.label}.pkl"
return os.path.join(self._cache_dir, cache_file)
@property
def _dataset_options(self):
options = set([])
for method in dir(self):
if method.startswith("_load_") and "owid" not in method:
options.add(method.replace("_load_", ""))
return list(options) + list(self._owid_labels_df.columns)
@staticmethod
def _specify_cache(cache_path, **kwds):
if kwds:
cache_ext = "_".join(
f"{key}={val}".replace(os.sep, "") for key, val in kwds.items()
)
cache_path = f"{os.path.splitext(cache_path)[0]}_{cache_ext}.pkl"
return cache_path
def _cache_dataset(self, df, cache_path=None, **kwds):
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
df.to_pickle(cache_path)
def _read_cache(self, cache_path=None, **kwds):
if not self.use_cache:
return None
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
try:
return pd.read_pickle(cache_path)
except Exception:
if os.path.exists(cache_path):
os.remove(cache_path)
return None
@staticmethod
def _snake_urlify(s):
# Replace all hyphens with underscore
s = s.replace(" - ", "_").replace("-", "_")
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", "", s)
# Replace all runs of whitespace with a underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def _init_owid(self):
cache_path = os.path.join(self._cache_dir, "owid_labels.pkl")
self._owid_labels_df = self._read_cache(cache_path=cache_path)
if self._owid_labels_df is not None:
return
owid_api_url = (
"https://api.github.com/"
"repos/owid/owid-datasets/"
"git/trees/master?recursive=1"
)
with urlopen(owid_api_url) as f:
sources = json.loads(f.read().decode("utf-8"))
owid_labels = {}
owid_raw_url = "https://raw.githubusercontent.com/owid/owid-datasets/master/"
for source_tree in sources["tree"]:
path = source_tree["path"]
if ".csv" not in path and ".json" not in path:
continue
label = "owid_" + self._snake_urlify(path.split("/")[-2].strip())
if label not in owid_labels:
owid_labels[label] = {}
url = f"{owid_raw_url}/{quote(path)}"
if ".csv" in path:
owid_labels[label]["data"] = url
elif ".json" in path:
owid_labels[label]["meta"] = url
self._owid_labels_df = pd.DataFrame(owid_labels)
self._cache_dataset(self._owid_labels_df, cache_path=cache_path)
def _load_owid(self, **kwds):
self._data_url = self._owid_labels_df[self.label]["data"]
meta_url = self._owid_labels_df[self.label]["meta"]
with urlopen(meta_url) as response:
meta = json.loads(response.read().decode())
self.label = meta["title"]
self._source = (
" & ".join(source["dataPublishedBy"] for source in meta["sources"])
+ " curated by Our World in Data (OWID)"
)
self._base_url = (
" & ".join(source["link"] for source in meta["sources"])
+ " through https://github.com/owid/owid-datasets"
)
self._description = re.sub(self._remove_href, "", meta["description"])
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = [self._snake_urlify(col) for col in df.columns]
return df
def _load_annual_co2(self, **kwds):
self._source = "NOAA ESRL"
self._base_url = "https://www.esrl.noaa.gov/"
self._data_url = (
"https://www.esrl.noaa.gov/"
"gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt"
)
self._description = (
"The carbon dioxide data on Mauna Loa constitute the longest record "
"of direct measurements of CO2 in the atmosphere. They were started "
"by <NAME> of the Scripps Institution of Oceanography in "
"March of 1958 at a facility of the National Oceanic and Atmospheric "
"Administration [Keeling, 1976]. NOAA started its own CO2 measurements "
"in May of 1974, and they have run in parallel with those made by "
"Scripps since then [Thoning, 1989]."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(
header=None,
comment="#",
sep="\s+", # noqa
names=["year", "co2_ppm", "uncertainty"],
)
base_kwds.update(kwds)
df = pd.read_csv(self._data_url, **base_kwds)
self._cache_dataset(df, **kwds)
return df
def _load_tc_tracks(self, **kwds):
self._source = "IBTrACS v04 - USA"
self._base_url = "https://www.ncdc.noaa.gov/ibtracs/"
self._data_url = (
"https://www.ncei.noaa.gov/data/"
"international-best-track-archive-for-climate-stewardship-ibtracs/"
"v04r00/access/csv/ibtracs.last3years.list.v04r00.csv"
)
self._description = (
"The intent of the IBTrACS project is to overcome data availability "
"issues. This was achieved by working directly with all the Regional "
"Specialized Meteorological Centers and other international centers "
"and individuals to create a global best track dataset, merging storm "
"information from multiple centers into one product and archiving "
"the data for public use."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(keep_default_na=False)
base_kwds.update(kwds)
df = pd.read_csv(self._data_url, **base_kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
cols = [
"BASIN",
"NAME",
"LAT",
"LON",
"ISO_TIME",
"USA_WIND",
"USA_PRES",
"USA_SSHS",
"USA_RMW",
]
df = df[cols]
df.columns = df.columns.str.lower()
df = df.iloc[1:]
df = df.set_index("iso_time")
df.index = pd.to_datetime(df.index)
numeric_cols = ["lat", "lon", "usa_rmw", "usa_pres", "usa_sshs", "usa_rmw"]
df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric, errors="coerce")
return df
def _load_covid19_us_cases(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://github.com/CSSEGISandData/COVID-19/raw/master/"
"csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_confirmed_US.csv"
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.drop(
["UID", "iso2", "iso3", "code3", "FIPS", "Admin2", "Country_Region"],
axis=1,
)
df.columns = df.columns.str.lower().str.rstrip("_")
df = df.melt(
id_vars=["lat", "long", "combined_key", "province_state"],
var_name="date",
value_name="cases",
)
df["date"] = pd.to_datetime(df["date"])
return df
def _load_covid19_global_cases(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://github.com/CSSEGISandData/COVID-19/raw/master/"
"csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_confirmed_global.csv"
)
self._description = (
"This is the data repository for the 2019 Novel Coronavirus "
"Visual Dashboard operated by the Johns Hopkins University Center "
"for Systems Science and Engineering (JHU CSSE). Also, Supported "
"by ESRI Living Atlas Team and the Johns Hopkins University "
"Applied Physics Lab (JHU APL)."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = df.columns.str.lower().str.rstrip("_")
df = df.melt(
id_vars=["province/state", "country/region", "lat", "long"],
var_name="date",
value_name="cases",
)
df.columns = df.columns.str.replace("/", "_")
df["date"] = pd.to_datetime(df["date"])
return df
def _load_covid19_population(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://raw.githubusercontent.com/"
"CSSEGISandData/COVID-19/master/"
"csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
)
self._description = (
"This is the data repository for the 2019 Novel Coronavirus "
"Visual Dashboard operated by the Johns Hopkins University Center "
"for Systems Science and Engineering (JHU CSSE). Also, Supported "
"by ESRI Living Atlas Team and the Johns Hopkins University "
"Applied Physics Lab (JHU APL)."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = df.columns.str.lower().str.rstrip("_")
return df
def _load_gapminder_life_expectancy(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"countries-etc-datapoints/ddf--datapoints--"
"life_expectancy_years--by--geo--time.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.rename(columns={"life_expectancy_years": "life_expectancy"})
return df
def _load_gapminder_income(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"countries-etc-datapoints/ddf--datapoints--"
"income_per_person_gdppercapita_ppp_inflation_adjusted"
"--by--geo--time.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = | pd.read_csv(self._data_url, **kwds) | pandas.read_csv |
import sys
import os
import pandas as pd
import numpy as np
import scipy as sp
import camoco as co
from itertools import chain
from camoco.Tools import log
# Initialize a new log object
log = log()
def snp2gene(args):
"""
Perform SNP (locus) to candidate gene mapping
"""
if args.out != sys.stdout:
# Create any non-existant directories
if os.path.dirname(args.out) != "":
os.makedirs(os.path.dirname(args.out), exist_ok=True)
if os.path.exists(args.out) and not args.force:
print("Output for {} exists! Skipping!".format(args.out), file=sys.stderr)
return None
# Set a flag saying this is from a COB refgen
from_cob = False
# Create the refgen (option to create it from a COB)
if co.Tools.available_datasets("Expr", args.refgen):
refgen = co.COB(args.refgen).refgen
from_cob = args.refgen
elif co.Tools.available_datasets("RefGen", args.refgen):
refgen = co.RefGen(args.refgen)
# Create the GWAS object
ont = co.GWAS(args.gwas)
if "all" in args.terms:
terms = ont.iter_terms()
else:
terms = [ont[term] for term in args.terms]
data = pd.DataFrame()
results = []
for term in terms:
for window_size in args.candidate_window_size:
for flank_limit in args.candidate_flank_limit:
if "effective" in args.snp2gene:
# Map to effective
effective_loci = term.effective_loci(window_size=window_size)
elif "strongest" in args.snp2gene:
effective_loci = term.strongest_loci(
window_size=window_size,
attr=args.strongest_attr,
lowest=args.strongest_higher,
)
genes = pd.DataFrame(
[
x.as_dict()
for x in refgen.candidate_genes(
effective_loci,
flank_limit=flank_limit,
include_parent_locus=True,
include_num_siblings=True,
include_num_intervening=True,
include_rank_intervening=True,
include_SNP_distance=True,
include_parent_attrs=args.include_parent_attrs,
attrs={"Term": term.id},
)
]
)
genes["FlankLimit"] = flank_limit
genes["WindowSize"] = window_size
genes["RefGen"] = refgen.name
if from_cob != False:
genes["COB"] = from_cob
data = | pd.concat([data, genes]) | pandas.concat |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # These are queries to validate RT_deid_base_cdr
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
project_id = ""
com_cdr = ""
deid_base_cdr=""
pipeline=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# # 1 Verify that if a person has multiple SELECTion(Hispanic + other race) in pre_deid_com_cdr, the output in deid_base_cdr observation table should result in two rows - one for Ethnicity AND one for race.
#
# test steps:
#
# - Verify the following columns in the de-id Observation table have been set to null:
# o value_as_string
# o value_source_value"
# however, this is already done in query 1, no need here anymore
# - Find person_ids in pre_deid_com_cdr person table who have ethnicity_source_concept_id values AS 1586147 & race_source_concept_id AS ( 1586146 OR 1586142 OR 1586143) , then verify that the output in the deid_base_cdr observation table for that person_id will results in 2-rows .
# - Verify that the 2-rows have 2-different value_source_concept_id values in the deid_base_cdr Observation table.
# - Verify that if a person_id in pre_deid_com_cdr hAS ethnicity_source_concept_id values AS 1586147 & race_source_concept_id AS ( 1586145 OR 1586144) in the person table, the output in the deid_base_cdr observation table for that person_id will result in 2-rows .
# - Verify that if a person_id hAS ethnicity_source_concept_id values AS 1586147 & race_source_concept_id AS multiple SELECTions (2 or more) in the person table, the output in the deid_base_cdr observation table for that person_id will result in 2 OR MORE rows .
#
#
# ## 1.1 step1
# - Verify the following columns in the deid_cdr Observation table have been set to null:
# o value_as_string
# o value_source_value
#
# has been done in first sql for deid, can be skipped here
# # 1.2 step2 Find person_ids in pre_dedi_com_cdr person table who have ethnicity_source_concept_id values AS 1586147 & race_source_concept_id AS ( 1586146 OR 1586142 OR 1586143) , then verify that the output in the deid_base_cdr observation table for that person_id after mapping will results in 2-rows .
#
# step 3
# Verify that the 2-rows have 2-different value_source_concept_id values in the deid_base_cdr Observation table.
query=f'''
WITH df1 AS (
SELECT m.research_id AS person_id
FROM `{project_id}.{pipeline}.pid_rid_mapping` m
JOIN `{project_id}.{com_cdr}.person` com
ON m.person_id = com.person_id
WHERE com.ethnicity_source_concept_id = 1586147
AND com.race_source_concept_id in (1586142, 1586143, 1586146 )
),
df2 AS (
SELECT DISTINCT person_id , COUNT (distinct value_source_concept_id ) AS countp
FROM `{project_id}.{deid_base_cdr}.observation`
WHERE observation_source_value = 'Race_WhatRaceEthnicity'
GROUP BY person_id
)
SELECT COUNT (*) AS n_not_two_rows FROM df2
WHERE person_id IN (SELECT person_id FROM df1) AND countp !=2
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.eq(0).any().any():
df = df.append({'query' : 'Query 1.2 these person_ids have 2-rows in observation', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1.2 these person_ids have 2-rows in observation', 'result' : ''},
ignore_index = True)
df1
# ## one error in new cdr. this person_id fails to meet the rule.
# +
query=f'''
WITH df1 AS (
SELECT m.research_id AS person_id
FROM `{project_id}.{pipeline}.pid_rid_mapping` m
JOIN `{project_id}.{com_cdr}.person` com
ON m.person_id = com.person_id
WHERE com.ethnicity_source_concept_id = 1586147
AND com.race_source_concept_id in (1586142, 1586143, 1586146 )
),
df2 AS (
SELECT DISTINCT person_id , count (distinct value_source_concept_id ) AS countp
FROM `{project_id}.{deid_base_cdr}.observation`
WHERE observation_source_value = 'Race_WhatRaceEthnicity'
GROUP BY person_id
)
SELECT distinct person_id, value_source_concept_id, value_source_value
FROM `{project_id}.{deid_base_cdr}.observation`
WHERE observation_source_value = 'Race_WhatRaceEthnicity'
AND person_id IN (SELECT person_id from df2 where countp !=2 )
AND person_id IN (SELECT person_id FROM df1)
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
from functools import reduce
from os.path import join, exists
from src.log import create_experiment
from joblib import Parallel, delayed, dump, load
import numpy as np
import pandas as pd
from itertools import product
from time import time
from sklearn import svm
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from tqdm import tqdm
import logging
import random
import click
import random
import warnings
from src.models import create_dir, instantiate_classifier, FUZZY_CLASSIFIERS
from src.data import (
get_sectors,
load_OHLCV_files,
create_target,
load_stock_entities,
USED_SECTORS,
)
from src.data.preparation import TRAIN_SIZE, drop_initial_nans
from src.models import lstm
logging.basicConfig(
format="%(asctime)s:%(name)s:%(levelname)s:%(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def score_classifier(y_true, y_pred) -> dict:
"""Compute performance measures."""
perf = {
"f1_macro": f1_score(y_true, y_pred, average="macro", zero_division=0),
"f1_weighted": f1_score(y_true, y_pred, average="weighted", zero_division=0),
"f1_pos": f1_score(
y_true, y_pred, labels=[0, 2], average="weighted", zero_division=0
),
}
return perf
def oversample(X_train, y_train, method):
from imblearn.over_sampling import ADASYN, SMOTE
if method == "adasyn":
return ADASYN().fit_resample(X_train, y_train)
else:
return SMOTE().fit_resample(X_train, y_train)
def get_stock_splits(
stock_df,
year,
classifier,
horizon,
l_threshold,
h_threshold,
training_type,
oversampling,
):
# Create discrete targets
targets = create_target(stock_df, horizon, l_threshold, h_threshold)
assert len(targets) == len(stock_df)
if classifier == "L3":
stock_df = stock_df.drop(columns=["Open", "High", "Low", "Close", "Volume"])
# Create training and testing split (eventually validation)
X, y = stock_df.loc[year, :], targets.loc[year]
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=TRAIN_SIZE, shuffle=False
)
if training_type == "cumulative":
past_years = stock_df.loc[: str(int(year) - 1), :]
past_targets = targets.loc[: str(int(year) - 1)]
X_train = pd.concat([past_years, X_train], axis=0)
y_train = pd.concat([past_targets, y_train], axis=0)
# use the specified year as test
# X_train = past_years
# y_train = past_targets
# X_test = stock_df.loc[year, :]
# y_test = targets.loc[year]
# Drop initial days with nans due to the technical indicators
X_train, first_valid_idx = drop_initial_nans(X_train)
y_train = y_train.iloc[first_valid_idx:]
# Drop last `horizon` days not to be predicted
X_test = X_test.iloc[:-horizon]
y_test = y_test.iloc[:-horizon]
# if a stock is trained on a single target label, e.g. all days are HOLD,
# just skip it, nothing can be learned
if y_train.unique().size <= 1:
raise RuntimeError()
# Oversampling for all but L3
if oversampling and classifier != "L3":
logger.info(f"Oversampling requested with {oversampling}")
X_train, y_train = oversample(X_train, y_train, method=oversampling)
return X_train, X_test, y_train, y_test
def test(model, X_test, y_test):
return model.predict(X_test)
def train(
X_train,
y_train,
tick: str,
classifier: str,
output_dir: str,
do_grid_search: bool,
normalize: bool,
**classifier_kwargs,
):
if do_grid_search:
logger.info(f"Grid search for {classifier} requested.")
clf, params, grid = instantiate_classifier(
classifier, return_grid=True, **classifier_kwargs
)
# update param grid keys to match the use of pipeline
if isinstance(grid, list):
grid = [{f"clf__{k}": v for k, v in g.items()} for g in grid]
else:
grid = {f"clf__{k}": v for k, v in grid.items()}
# Normalize for all classifiers but L3
if normalize:
scaler = (
MinMaxScaler() if classifier in FUZZY_CLASSIFIERS else StandardScaler()
)
pipeline = Pipeline([("scaler", scaler), ("clf", clf)])
else:
pipeline = Pipeline([("clf", clf)])
n_jobs = 1 if classifier == "MLP" else -1
gs = GridSearchCV(
pipeline,
param_grid=grid,
scoring="f1_macro",
n_jobs=n_jobs,
cv=TimeSeriesSplit(n_splits=3),
verbose=10,
)
if classifier == "L3":
gs.fit(
X_train,
y_train,
clf__column_names=X_train.columns,
clf__remove_training_dir=True,
)
else:
gs.fit(X_train, y_train)
# save the best estimator
dump(
gs.best_estimator_, join(output_dir, "models", f"best_model_{tick}.joblib")
)
# for L3 save also the rules
if classifier == "L3":
create_dir(join(output_dir, "rules", tick))
gs.best_estimator_.named_steps["clf"].save_rules(
join(output_dir, "rules", tick)
)
return gs
else:
# TODO implement this if we ever need it
raise RuntimeError("We don't want to do that now.")
@click.command()
@click.argument("output_dir", type=click.Path(file_okay=False, writable=True))
@click.argument("classifier", type=click.STRING)
@click.argument("year", type=click.STRING)
@click.option(
"--training_type", type=click.Choice(["year", "cumulative"]), default="year"
)
@click.option("--horizon", type=click.INT, default=1)
@click.option("--log_comet", is_flag=True)
@click.option("--do_grid_search", is_flag=True)
@click.option("--normalize", is_flag=True)
@click.option("--oversampling", type=click.STRING, default=None)
@click.option("--h_threshold", type=click.FLOAT, default=1)
@click.option("--l_threshold", type=click.FLOAT, default=-1)
@click.option("--seed", type=click.INT, default=42)
@click.option("--test_run", is_flag=True)
@click.option("--parallel", is_flag=True)
@click.option("--n_workers", type=click.INT, default=16)
@click.option("--seq_length", type=click.INT, default=5)
@click.option("--batch_size", type=click.INT, default=128)
@click.option("--max_epochs", type=click.INT, default=30)
@click.option("--lr", type=click.FLOAT, default=2e-5)
@click.option("--early_stop", type=click.INT, default=0)
@click.option("--gpus", type=click.INT, default=1)
@click.option("--stateful", is_flag=True)
@click.option("--reduce_lr", type=click.INT, default=0)
@click.option("--rule_sets_modifier", type=click.STRING, default="standard")
@click.option("--use_sectors", is_flag=True)
def main(
output_dir,
classifier,
year,
training_type,
horizon,
log_comet,
do_grid_search,
normalize,
oversampling,
h_threshold,
l_threshold,
seed,
test_run,
parallel,
n_workers,
seq_length,
batch_size,
max_epochs,
lr,
early_stop,
gpus,
stateful,
reduce_lr,
rule_sets_modifier,
use_sectors,
):
hparams = locals()
random.seed(seed)
create_dir(output_dir)
create_dir(join(output_dir, "models"))
if classifier == "L3":
create_dir(join(output_dir, "rules"))
in_dir = (
join("data", "processed", "SP500_technical")
if classifier != "L3"
else join("data", "processed", "SP500_technical_discretized", "DENSE")
)
stock_by_tick = load_OHLCV_files(in_dir)
logger.info(f"Loaded {len(stock_by_tick)} stocks")
# sort ticks alphabetically
ticks = sorted(list(stock_by_tick.keys()))
if test_run:
stock_by_tick = {
"AAPL": stock_by_tick["AAPL"],
"MSFT": stock_by_tick["MSFT"],
"AMZN": stock_by_tick["AMZN"],
}
experiment = None
if log_comet:
exp = create_experiment()
exp.add_tag("training")
exp.log_parameters(hparams)
exp.log_other("n_stocks", len(stock_by_tick))
classifier_args = dict()
if classifier == "L3":
classifier_args["rule_sets_modifier"] = rule_sets_modifier
if classifier == "MLP":
classifier_args["random_state"] = seed
if classifier == "LSTM":
classifier_args["seed"] = seed
if not use_sectors:
"""Run trading separately per stock."""
results = list()
for tick in tqdm(ticks, desc="Stocks"):
stock_df = stock_by_tick[tick]
X_train, X_test, y_train, y_test = get_stock_splits(
stock_df,
year,
classifier,
horizon,
l_threshold,
h_threshold,
training_type,
oversampling,
)
if classifier != "LSTM":
model = train(
X_train,
y_train,
tick,
classifier,
output_dir,
do_grid_search,
normalize,
seed,
experiment,
**classifier_args,
)
y_pred = model.predict(X_test)
best_params = model.best_params_
else:
# LSTM
logger.info("Disabling model and experiment logging with LSTM.")
save_model = False
comet_experiment = None
best_model_path = lstm.train(
X_train,
y_train,
3,
seq_length,
batch_size,
max_epochs,
lr,
reduce_lr,
gpus,
seed,
early_stop,
stateful,
comet_experiment=comet_experiment,
model_dir=join(output_dir, "models"),
tick=tick,
save_model=save_model,
)
y_pred = lstm.test(
best_model_path,
X_train,
X_test,
y_train,
y_test,
seq_length,
batch_size,
)
best_params = None
os.remove(best_model_path)
test_perf = score_classifier(y_test, y_pred)
results.append((y_test, test_perf, y_pred, best_params))
else:
"""Group stocks into sectors. We keep only the most populated ones."""
if log_comet:
exp.add_tag("sectors")
logger.info(f"Training on {len(USED_SECTORS)} sectors")
logger.info(f"Sectors: {USED_SECTORS}")
stocks = load_stock_entities(join("data", "raw"))
sectors = get_sectors(stocks)
results = list()
process_ticks = list()
for sec in tqdm(USED_SECTORS, desc="Sectors"):
curr_ticks = sectors.loc[sectors == sec].index.values
process_ticks.extend(curr_ticks)
Xtr, Xte, ytr, yte = list(), list(), list(), list()
for tick in curr_ticks:
stock_df = stock_by_tick[tick]
X_train, X_test, y_train, y_test = get_stock_splits(
stock_df,
year,
classifier,
horizon,
l_threshold,
h_threshold,
training_type,
oversampling,
)
Xtr.append(X_train)
Xte.append(X_test)
ytr.append(y_train)
yte.append(y_test)
X_train = pd.concat(Xtr, axis=0)
y_train = pd.concat(ytr, axis=0)
logger.info(f"Dimensions of this sector: X_train: {X_train.shape}")
if classifier != "LSTM":
model = train(
X_train=X_train,
y_train=y_train,
tick=sec,
classifier=classifier,
output_dir=output_dir,
do_grid_search=do_grid_search,
normalize=normalize,
**classifier_args,
)
# Predict one stock at a time within the current sector
for tick, X_test, y_test in tqdm(
zip(curr_ticks, Xte, yte),
desc="Stocks",
leave=False,
total=len(curr_ticks),
):
y_pred = model.predict(X_test)
test_perf = score_classifier(y_test, y_pred)
results.append((y_test, test_perf, y_pred, model.best_params_))
else:
# LSTM
logger.info("Disabling model and experiment logging with LSTM.")
save_model = False
comet_experiment = None
best_model_path = lstm.train(
X_train=X_train,
y_train=y_train,
num_classes=3,
seq_length=seq_length,
batch_size=batch_size,
max_epochs=max_epochs,
lr=lr,
reduce_lr=reduce_lr,
gpus=gpus,
seed=seed,
early_stop=early_stop,
stateful=stateful,
comet_experiment=comet_experiment,
model_dir=join(output_dir, "models"),
tick=sec,
save_model=save_model,
)
# Predict one stock at a time within the current sector
for tick, X_test, y_test in tqdm(
zip(curr_ticks, Xte, yte),
desc="Stocks",
leave=False,
total=len(curr_ticks),
):
y_pred = lstm.test(
best_model_path,
X_train,
X_test,
y_train,
y_test,
seq_length,
batch_size,
)
test_perf = score_classifier(y_test, y_pred)
results.append((y_test, test_perf, y_pred, None))
os.remove(best_model_path)
logger.info(f"Processed {len(results)} stocks")
ticks = process_ticks
# Save all the results
create_dir(join(output_dir, "preds"))
y_tests = [r[0] for r in results]
y_tests = | pd.concat(y_tests, axis=1, keys=ticks) | pandas.concat |
'''
Program: LBplot v3.1
Author: <NAME>
Released: 06/10/2020
Available in <https://github.com/HectorKroes/LBplot>
'''
##REFERENCES##
References = ('''
-<NAME>., & <NAME>. (1934). The Determination of Enzyme Dissociation Constants. Journal of the American Chemical Society, 56(3), 658–666. doi:10.1021/ja01318a036
-<NAME>., and <NAME>. (1913) Die Kinetik der Invertinwirkung. Biochem. Z. 49, 333–369
-<NAME>.; <NAME>.; and <NAME>. Statistical Distributions, 3rd ed. New York: Wiley, p. 12-14, 2000.
-<NAME>. "Hypothesis Testing." From MathWorld--A Wolfram Web Resource. https://mathworld.wolfram.com/HypothesisTesting.html
-Weisstein, <NAME>. "Standard Error." From MathWorld--A Wolfram Web Resource. https://mathworld.wolfram.com/StandardError.html
-Weisstein, <NAME>. "Correlation Coefficient." From MathWorld--A Wolfram Web Resource. https://mathworld.wolfram.com/CorrelationCoefficient.html ''')
##IMPORTS##
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import time, math, copy, os, sys, codecs, datetime, calendar
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import NullFormatter
import matplotlib.pyplot as plt
from datetime import datetime
import PySimpleGUI as sg
from scipy import stats
import pandas as pd
import numpy as np
##FUNCTIONS##
def erro(x, z):
a = []
xm = sum(x)/len(x)
for r in range(0,len(x)):
at = ((x[r]-xm)**2)
a.append(at)
atot = (sum(a)/(len(x)-z))
return atot
def floatit(a):
b = []
for aa in a:
b.append(float(aa))
return b
def breaqui():
global sm
sm = 'end'
global st
st = 'end'
def format(a, j):
c = []
for b in a:
c.append(j.format(float(b)))
return c
def DI():
layout1 = [ [sg.Image(os.getcwd()+os.sep+'Logo.png')],
[sg.Text('')],
[sg.Button('Start new project', size = (40,1))],
[sg.Button('Load previous projects', size = (40,1))],
[sg.Button('References', size = (40,1))],
[sg.Button('Credits', size = (40,1))],
[sg.Button('End program', size = (40,1))],
[sg.Text('')]]
global de
de = sg.Window('LBplot', layout1, element_justification = 'center')
def pdftable(df):
RNG3 = [[sg.Text("What should be the archive name?")],
[sg.InputText('', size = (51,1), key= '-archnam-')],
[sg.Text(" "), sg.Button('Continue', size = (20,1), key = '-namt-')]]
dp = sg.Window('LBplot', RNG3, element_justification = 'left')
eventp, valuep = dp.read()
namt = ((str(valuep['-archnam-']).replace('.pdf', ''))+'.pdf')
dp.close()
fig, ax =plt.subplots(figsize=(12,4))
ax.axis('tight')
ax.axis('off')
the_table = ax.table(cellText=df.values, colLabels=df.columns, loc= 'center')
pp = PdfPages(arq+namt)
pp.savefig(fig, bbox_inches='tight')
pp.close()
RNG4 = [[sg.Text(namt+" created successfully!")],
[sg.Button('Continue', size = (21,1), key = '-rnm2-')]]
dc = sg.Window('LBplot', RNG4, element_justification = 'center')
eventc, valuec = dc.read()
dc.close()
plt.clf()
plt.close()
def exceltable(df):
RNG3 = [[sg.Text("What should be the archive name?")],
[sg.InputText('', size = (51,1), key= '-archnam-')],
[sg.Text(" "), sg.Button('Continue', size = (20,1), key = '-namt-')]]
dp = sg.Window('LBplot', RNG3, element_justification = 'left')
eventp, valuep = dp.read()
namt = ((str(valuep['-archnam-']).replace('.xlsx', ''))+'.xlsx')
dp.close()
df.to_excel (arq+namt, index = False, header=True)
RNG4 = [[sg.Text(namt+" created successfully!")],
[sg.Button('Continue', size = (21,1), key = '-rnm2-')]]
dc = sg.Window('LBplot', RNG4, element_justification = 'center')
eventc, valuec = dc.read()
dc.close()
plt.clf()
plt.close()
def savefig(ext):
RNG1 = [[sg.Text("What should be the archive name?")],
[sg.InputText('', size = (51,1), key= '-archnam-')],
[sg.Text(" "), sg.Button('Continue', size = (20,1), key = '-namt-')]]
dp = sg.Window('LBplot', RNG1, element_justification = 'left')
kt = 'a'
while kt == 'a':
eventp, valuep = dp.read()
if eventp == '-namt-':
namt = ((str(valuep['-archnam-']).replace(ext, ''))+ext)
dp.close()
plt.savefig(arq+namt)
RNG2 = [[sg.Text(namt+" created successfully!")],
[sg.Button('Continue', size = (21,1), key = '-rnm2-')]]
dc = sg.Window('LBplot', RNG2, element_justification = 'center')
kd = 'a'
while kd == 'a':
eventc, valuec = dc.read()
if eventc == '-rnm2-':
dc.close()
kt='b'
kd='b'
break
elif event in (None, 'Exit'):
dc.close()
kt='b'
kd='b'
breaqui()
break
def pltnow(title, xx, yy, mymodel, x, uV, v):
plt.plot(x, mymodel, '-r')
plt.plot([xx], [yy], 'bo')
plt.xlim(0,)
plt.ylim(0,)
plt.suptitle(title)
plt.xlabel('1/[S] '+'(1/'+v+')', color='#298A08')
if len(uV) == 1:
plt.ylabel('1/V0 '+'(1/'+u+')', color='#B40404')
elif len(uV) == 2:
plt.ylabel('1/V0 '+'('+str(uV[1])+'/'+str(uV[0])+')', color='#B40404')
plt.grid()
def bulk():
x = copy.copy(oS)
y = copy.copy(oV0)
xx = copy.copy(x)
yy = copy.copy(y)
slope, intercept, rv, pv, std_err = stats.linregress(x, y)
def myfunc(x):
return slope * x + intercept
errorg2 = erro(x, 0)
errorg = (math.sqrt(errorg2))
errors2 = erro(x, 1)
errors = (math.sqrt(errors2))
x.append(0)
x.append(1.1*(float(max(x))))
mymodel = list(map(myfunc, x))
title = 'Lineweaver-Burk double reciprocal plot'
sm = 'start'
while sm == 'start':
layout3 = [[sg.T('DATA SET', size=(103,1), justification='center')]]
headings1 = [' ','V0','1/V0','[S]','1/[S]']
leg1 = [sg.T(a, size=(20,1), background_color='white', justification='center', pad=(1,1)) for a in headings1]
layout3.append(leg1)
for a in range(len(oV0)):
data1 = [ a+1, "{:.2e}".format(float(tmpV0[a])), "{:.2e}".format(float(oV0[a])), "{:.2e}".format(float(tmpS[a])), "{:.2e}".format(float(oS[a]))]
row = [sg.T(a, size=(20,1), background_color='white', justification='center', pad=(1,1)) for a in data1]
layout3.append(row)
line = [sg.T('', size=(20,1), justification='center', pad=(1,1))]
layout3.append(line)
title2 = [sg.T('KM AND VMAX', size=(103,1), justification='center', pad=(1,1))]
layout3.append(title2)
headings2 =['Michaelis constant (Km)', 'Maximum Reaction Rate (Vmax)', 'Slope (Km/Vmax)']
leg2 = [sg.T(a, size=(34,1), background_color='white', justification='center', pad=(1,1)) for a in headings2]
layout3.append(leg2)
data2 = ["{:.5e}".format(slope/intercept), "{:.5e}".format(1/intercept), "{:.5e}".format(slope)]
row2 = [sg.T(a, size=(34,1), background_color='white', justification='center', pad=(1,1)) for a in data2]
layout3.append(row2)
line = [sg.T('', size=(15,1), justification='center', pad=(1,1))]
layout3.append(line)
title3 = [sg.T('LINEAR REGRESSION', size=(103,1), justification='center', pad=(1,1))]
layout3.append(title3)
headings3 = ['Correlation Coefficient (R)', 'Coefficient of Determination (R^2)', 'Standard error', 'P-Value']
leg3 = [sg.T(a, size=(25,1), background_color='white', justification='center', pad=(1,1)) for a in headings3]
layout3.append(leg3)
data3 = ["{:.5e}".format(rv), "{:.5e}".format(rv**2), "{:.5e}".format(std_err), "{:.5e}".format(pv)]
row3 = [sg.T(a, size=(25,1), background_color='white', justification='center', pad=(1,1)) for a in data3]
layout3.append(row3)
line = [sg.T('', size=(15,1), justification='center', pad=(1,1))]
layout3.append(line)
title4 = [sg.T('VARIANCES AND DEVIATIONS', size=(103,1), justification='center', pad=(1,1))]
layout3.append(title4)
headings4 = ['Population Variance (σ^2)', 'Population Standard Deviation (σ)', 'Sample Variance (S^2)', 'Sample Standard Deviation (S)']
leg4 = [sg.T(a, size=(25,1), background_color='white', justification='center', pad=(1,1)) for a in headings4]
layout3.append(leg4)
data4 = ["{:.5e}".format(errorg2), "{:.5e}".format(errorg), "{:.5e}".format(errors2), "{:.5e}".format(errors)]
row4 = [sg.T(a, size=(25,1), background_color='white', justification='center', pad=(1,1)) for a in data4]
layout3.append(row4)
line = [sg.T('', size=(15,1), justification='center', pad=(1,1))]
layout3.append(line)
GBM = ['PLOT', ['&Show plot::-SHP-', '&Rename graph::-RNG-', '&Save as pdf::-GPDF-', '&Save as png::-GPNG-', '&Save as jpg::-GJPG-']]
DSM = ['DSM', ['&Save as pdf::SPNG', '&Export to excel::-EXTE-']]
OPT = ['OPT', ['&Return to main menu::-RTMM-', '&End program::-ENP-']]
scroll = [[sg.Text('Project: '+prjname), sg.Text('Date of creation: '+doc)],
[sg.Col(layout3, size=(846, 500), scrollable=True, vertical_scroll_only=True)],
[sg.ButtonMenu('PLOT', GBM, key='-GBM-', size=(38,1)), sg.ButtonMenu('DATA SET', DSM, key='-DSM-', size=(38,1)), sg.ButtonMenu('KM AND VMAX', DSM, key='-KAV-', size=(38,1))],
[sg.ButtonMenu('LINEAR REGRESSION', DSM, key='-LNR-', size=(38,1)), sg.ButtonMenu('VARIANCES AND DEVIATIONS', DSM, key='-VAD-', size=(38,1)), sg.ButtonMenu('OPTIONS', OPT, key='-OPT-', size=(38,1))]]
dg = sg.FlexForm('LBplot', scroll)
eventg, valueg = dg.read()
if valueg == {'-GBM-': 'Show plot::-SHP-', '-DSM-': None, '-KAV-': None, '-LNR-': None, '-VAD-': None, '-OPT-': None}:
print('a')
dg.close()
plt.subplot()
pltnow(title, xx, yy, mymodel, x, uV, v)
plt.gca().yaxis.set_minor_formatter(NullFormatter())
plt.subplots_adjust(top=0.90, bottom=0.10, left=0.14, right=0.95, hspace=0.25,
wspace=0.35)
fig = plt.gcf()
figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
layout = [[sg.Canvas(size=(figure_w, figure_h), key='canvas')],
[sg.Button('Continue', size = (21,1), key = '-grphr-')]]
window = sg.Window('LBplot', layout, finalize=True, element_justification = 'center')
fig_canvas_agg = draw_figure(window['canvas'].TKCanvas, fig)
event, values = window.read()
if event in (None, 'Exit'):
breaqui()
window.close()
break
elif event == '-grphr-':
window.close()
plt.clf()
plt.close()
elif valueg == {'-GBM-': 'Rename graph::-RNG-', '-DSM-': None, '-KAV-': None, '-LNR-': None, '-VAD-': None, '-OPT-': None}:
dg.close()
RNG1 = [[sg.Text("What should be the graph's title?")],
[sg.InputText('', size = (51,1), key= '-title-')],
[sg.Text(" "), sg.Button('Continue', size = (20,1), key = '-rnm1-')]]
dh = sg.Window('LBplot', RNG1, element_justification = 'left')
kt = 'a'
while kt == 'a':
eventh, valueh = dh.read()
if eventh == '-rnm1-':
title = str(valueh['-title-'])
dh.close()
RNG2 = [[sg.Text(title+" is now the graph's title!")],
[sg.Button('Continue', size = (21,1), key = '-rnm2-')]]
dj = sg.Window('LBplot', RNG2, element_justification = 'center')
kd = 'a'
while kd == 'a':
eventj, valuej = dj.read()
if eventj == '-rnm2-':
dj.close()
kt='b'
kd='b'
break
elif valueg == {'-GBM-': 'Save as pdf::-GPDF-', '-DSM-': None, '-KAV-': None, '-LNR-': None, '-VAD-': None, '-OPT-': None}:
pltnow(title, xx, yy, mymodel, x, uV, u, v)
dg.close()
savefig('.pdf')
plt.clf()
plt.close()
elif valueg == {'-GBM-': 'Save as png::-GPNG-', '-DSM-': None, '-KAV-': None, '-LNR-': None, '-VAD-': None, '-OPT-': None}:
pltnow(title, xx, yy, mymodel, x, uV, u, v)
dg.close()
savefig('.png')
plt.clf()
plt.close()
elif valueg == {'-GBM-': 'Save as jpg::-GJPG-', '-DSM-': None, '-KAV-': None, '-LNR-': None, '-VAD-': None, '-OPT-': None}:
pltnow(title, xx, yy, mymodel, x, uV, u, v)
dg.close()
savefig('.jpg')
plt.clf()
plt.close()
elif eventg == '-DSM-':
plt.clf()
cap=[]
V0j = format(tmpV0, "{:.2e}")
oV0j = format(oV0, "{:.2e}")
Sj = format(tmpS, "{:.2e}")
oSj = format(oS, "{:.2e}")
for bap in range(0, len(tmpV0)):
cap.append(str(bap+1))
data = {'': cap,
'V0': V0j,
'1/V0': oV0j,
'[S]': Sj,
'1/[S]': oSj
}
df = | pd.DataFrame(data, columns = ['','V0','1/V0','[S]','1/[S]']) | pandas.DataFrame |
from functools import partial
import os
import unittest
from nose.tools import assert_equal, assert_list_equal, nottest, raises
from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from six import iteritems
import pandas as pd
from py_stringsimjoin.join.cosine_join import cosine_join
from py_stringsimjoin.join.dice_join import dice_join
from py_stringsimjoin.join.jaccard_join import jaccard_join
from py_stringsimjoin.join.overlap_coefficient_join import overlap_coefficient_join
from py_stringsimjoin.join.overlap_join import overlap_join
from py_stringsimjoin.utils.converter import dataframe_column_to_str
from py_stringsimjoin.utils.generic_helper import COMP_OP_MAP, \
remove_redundant_attrs
from py_stringsimjoin.utils.simfunctions import get_sim_function
JOIN_FN_MAP = {'COSINE': cosine_join,
'DICE': dice_join,
'JACCARD': jaccard_join,
'OVERLAP_COEFFICIENT': overlap_coefficient_join}
DEFAULT_COMP_OP = '>='
DEFAULT_L_OUT_PREFIX = 'l_'
DEFAULT_R_OUT_PREFIX = 'r_'
@nottest
def test_valid_join(scenario, sim_measure_type, args, convert_to_str=False):
(ltable_path, l_key_attr, l_join_attr) = scenario[0]
(rtable_path, r_key_attr, r_join_attr) = scenario[1]
join_fn = JOIN_FN_MAP[sim_measure_type]
# load input tables for the tests.
ltable = pd.read_csv(os.path.join(os.path.dirname(__file__),
ltable_path))
rtable = pd.read_csv(os.path.join(os.path.dirname(__file__),
rtable_path))
if convert_to_str:
dataframe_column_to_str(ltable, l_join_attr, inplace=True)
dataframe_column_to_str(rtable, r_join_attr, inplace=True)
missing_pairs = set()
# if allow_missing flag is set, compute missing pairs.
if len(args) > 4 and args[4]:
for l_idx, l_row in ltable.iterrows():
for r_idx, r_row in rtable.iterrows():
if (pd.isnull(l_row[l_join_attr]) or
pd.isnull(r_row[r_join_attr])):
missing_pairs.add(','.join((str(l_row[l_key_attr]),
str(r_row[r_key_attr]))))
# remove rows with missing value in join attribute and create new dataframes
# consisting of rows with non-missing values.
ltable_not_missing = ltable[pd.notnull(ltable[l_join_attr])].copy()
rtable_not_missing = rtable[pd.notnull(rtable[r_join_attr])].copy()
if len(args) > 3 and (not args[3]):
ltable_not_missing = ltable_not_missing[ltable_not_missing.apply(
lambda row: len(args[0].tokenize(str(row[l_join_attr]))), 1) > 0]
rtable_not_missing = rtable_not_missing[rtable_not_missing.apply(
lambda row: len(args[0].tokenize(str(row[r_join_attr]))), 1) > 0]
# generate cartesian product to be used as candset
ltable_not_missing['tmp_join_key'] = 1
rtable_not_missing['tmp_join_key'] = 1
cartprod = pd.merge(ltable_not_missing[[l_key_attr,
l_join_attr,
'tmp_join_key']],
rtable_not_missing[[r_key_attr,
r_join_attr,
'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
ltable_not_missing.drop('tmp_join_key', 1)
rtable_not_missing.drop('tmp_join_key', 1)
sim_func = get_sim_function(sim_measure_type)
# apply sim function to the entire cartesian product to obtain
# the expected set of pairs satisfying the threshold.
cartprod['sim_score'] = cartprod.apply(lambda row: round(sim_func(
args[0].tokenize(str(row[l_join_attr])),
args[0].tokenize(str(row[r_join_attr]))), 4),
axis=1)
comp_fn = COMP_OP_MAP[DEFAULT_COMP_OP]
# Check for comp_op in args.
if len(args) > 2:
comp_fn = COMP_OP_MAP[args[2]]
expected_pairs = set()
for idx, row in cartprod.iterrows():
if comp_fn(float(row['sim_score']), args[1]):
expected_pairs.add(','.join((str(row[l_key_attr]),
str(row[r_key_attr]))))
expected_pairs = expected_pairs.union(missing_pairs)
orig_return_set_flag = args[0].get_return_set()
# use join function to obtain actual output pairs.
actual_candset = join_fn(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
*args)
assert_equal(args[0].get_return_set(), orig_return_set_flag)
expected_output_attrs = ['_id']
l_out_prefix = DEFAULT_L_OUT_PREFIX
r_out_prefix = DEFAULT_R_OUT_PREFIX
# Check for l_out_prefix in args.
if len(args) > 7:
l_out_prefix = args[7]
expected_output_attrs.append(l_out_prefix + l_key_attr)
# Check for r_out_prefix in args.
if len(args) > 8:
r_out_prefix = args[8]
expected_output_attrs.append(r_out_prefix + r_key_attr)
# Check for l_out_attrs in args.
if len(args) > 5:
if args[5]:
l_out_attrs = remove_redundant_attrs(args[5], l_key_attr)
for attr in l_out_attrs:
expected_output_attrs.append(l_out_prefix + attr)
# Check for r_out_attrs in args.
if len(args) > 6:
if args[6]:
r_out_attrs = remove_redundant_attrs(args[6], r_key_attr)
for attr in r_out_attrs:
expected_output_attrs.append(r_out_prefix + attr)
# Check for out_sim_score in args.
if len(args) > 9:
if args[9]:
expected_output_attrs.append('_sim_score')
else:
expected_output_attrs.append('_sim_score')
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_candset.columns.values),
expected_output_attrs)
actual_pairs = set()
for idx, row in actual_candset.iterrows():
actual_pairs.add(','.join((str(row[l_out_prefix + l_key_attr]),
str(row[r_out_prefix + r_key_attr]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
def test_set_sim_join():
# data to be tested.
test_scenario_1 = [(os.sep.join(['data', 'table_A.csv']), 'A.ID', 'A.name'),
(os.sep.join(['data', 'table_B.csv']), 'B.ID', 'B.name')]
data = {'TEST_SCENARIO_1' : test_scenario_1}
# similarity measures to be tested.
sim_measure_types = ['COSINE', 'DICE', 'JACCARD', 'OVERLAP_COEFFICIENT']
# similarity thresholds to be tested.
thresholds = {'JACCARD' : [0.3, 0.5, 0.7, 0.85, 1],
'COSINE' : [0.3, 0.5, 0.7, 0.85, 1],
'DICE' : [0.3, 0.5, 0.7, 0.85, 1],
'OVERLAP_COEFFICIENT' : [0.3, 0.5, 0.7, 0.85, 1]}
# tokenizers to be tested.
tokenizers = {'SPACE_DELIMITER': DelimiterTokenizer(delim_set=[' '],
return_set=True),
'2_GRAM': QgramTokenizer(qval=2, return_set=True),
'3_GRAM': QgramTokenizer(qval=3, return_set=True)}
# Test each combination of similarity measure, threshold and tokenizer for different test scenarios.
for label, scenario in iteritems(data):
for sim_measure_type in sim_measure_types:
for threshold in thresholds.get(sim_measure_type):
for tok_type, tok in iteritems(tokenizers):
test_function = partial(test_valid_join, scenario,
sim_measure_type, (tok, threshold))
test_function.description = 'Test ' + sim_measure_type + \
' with ' + str(threshold) + ' threshold and ' + \
tok_type + ' tokenizer for ' + label + '.'
yield test_function,
# Test each similarity measure with different comparison operators.
for sim_measure_type in sim_measure_types:
for comp_op in ['>', '=']:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.3, comp_op, False))
test_function.description = 'Test ' + sim_measure_type + \
' with comp_op ' + comp_op + '.'
yield test_function,
# Test each similarity measure with allow_missing set to True.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.7, '>=', False, True))
test_function.description = 'Test ' + sim_measure_type + \
' with allow_missing set to True.'
yield test_function,
# Test each similarity measure with output attributes added.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.3, '>=', False, False,
['A.ID', 'A.birth_year', 'A.zipcode'],
['B.ID', 'B.name', 'B.zipcode']))
test_function.description = 'Test ' + sim_measure_type + \
' with output attributes.'
yield test_function,
# Test each similarity measure with a different output prefix.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.7, '>=', False, False,
['A.birth_year', 'A.zipcode'],
['B.name', 'B.zipcode'],
'ltable.', 'rtable.'))
test_function.description = 'Test ' + sim_measure_type + \
' with output attributes and prefix.'
yield test_function,
# Test each similarity measure with output_sim_score disabled.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.7, '>=', False, False,
['A.birth_year', 'A.zipcode'],
['B.name', 'B.zipcode'],
'ltable.', 'rtable.',
False))
test_function.description = 'Test ' + sim_measure_type + \
' with sim_score disabled.'
yield test_function,
# Test each similarity measure with n_jobs above 1.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.3, '>=', False, False,
['A.birth_year', 'A.zipcode'],
['B.name', 'B.zipcode'],
'ltable.', 'rtable.',
False, 2))
test_function.description = 'Test ' + sim_measure_type + \
' with n_jobs above 1.'
yield test_function,
# scenario where join attributes are of type int
test_scenario_2 = [(os.sep.join(['data', 'table_A.csv']), 'A.ID', 'A.zipcode'),
(os.sep.join(['data', 'table_B.csv']), 'B.ID', 'B.zipcode')]
# Test each similarity measure with join attribute of type int.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_2,
sim_measure_type,
(tokenizers['2_GRAM'],
0.3), True)
test_function.description = 'Test ' + sim_measure_type + \
' with join attribute of type int.'
yield test_function,
# scenario where join attributes are of type float
test_scenario_3 = [(os.sep.join(['data', 'table_A.csv']), 'A.ID', 'A.hourly_wage'),
(os.sep.join(['data', 'table_B.csv']), 'B.ID', 'B.hourly_wage')]
# Test each similarity measure with join attribute of type float.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_3,
sim_measure_type,
(tokenizers['2_GRAM'],
0.3), True)
test_function.description = 'Test ' + sim_measure_type + \
' with join attribute of type float.'
yield test_function,
# Test each similarity measure with a tokenizer with return_set flag set to False.
for sim_measure_type in sim_measure_types:
tok = QgramTokenizer(2)
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type, (tok, 0.3))
test_function.description = 'Test ' + sim_measure_type + \
' with a tokenizer with return_set flag set to False .'
yield test_function,
# Test each similarity measure with allow_empty set to True.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.7, '>=', True))
test_function.description = 'Test ' + sim_measure_type + \
' with allow_empty set to True.'
yield test_function,
# Test each similarity measure with allow_empty set to True and with output attributes.
for sim_measure_type in sim_measure_types:
test_function = partial(test_valid_join, test_scenario_1,
sim_measure_type,
(tokenizers['SPACE_DELIMITER'],
0.7, '>=', True, False,
['A.name'], ['B.name']))
test_function.description = 'Test ' + sim_measure_type + \
' with allow_empty set to True and with output attributes.'
yield test_function,
class JaccardJoinInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}])
self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.threshold = 0.8
@raises(TypeError)
def test_jaccard_join_invalid_ltable(self):
jaccard_join([], self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_jaccard_join_invalid_rtable(self):
jaccard_join(self.A, [], 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_invalid_l_key_attr(self):
jaccard_join(self.A, self.B, 'A.invalid_id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_invalid_r_key_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.invalid_id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_invalid_l_join_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.invalid_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_invalid_r_join_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.invalid_attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_numeric_l_join_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.int_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_jaccard_join_numeric_r_join_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.int_attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_jaccard_join_invalid_tokenizer(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
[], self.threshold)
@raises(AssertionError)
def test_jaccard_join_invalid_threshold_above(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 1.5)
@raises(AssertionError)
def test_jaccard_join_invalid_threshold_below(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, -0.1)
@raises(AssertionError)
def test_jaccard_join_invalid_threshold_zero(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 0)
@raises(AssertionError)
def test_jaccard_join_invalid_comp_op_lt(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<')
@raises(AssertionError)
def test_jaccard_join_invalid_comp_op_le(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<=')
@raises(AssertionError)
def test_jaccard_join_invalid_l_out_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.invalid_attr'], ['B.attr'])
@raises(AssertionError)
def test_jaccard_join_invalid_r_out_attr(self):
jaccard_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.attr'], ['B.invalid_attr'])
class CosineJoinInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}])
self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.threshold = 0.8
@raises(TypeError)
def test_cosine_join_invalid_ltable(self):
cosine_join([], self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_cosine_join_invalid_rtable(self):
cosine_join(self.A, [], 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_invalid_l_key_attr(self):
cosine_join(self.A, self.B, 'A.invalid_id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_invalid_r_key_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.invalid_id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_invalid_l_join_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.invalid_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_invalid_r_join_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.invalid_attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_numeric_l_join_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.int_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_cosine_join_numeric_r_join_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.int_attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_cosine_join_invalid_tokenizer(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
[], self.threshold)
@raises(AssertionError)
def test_cosine_join_invalid_threshold_above(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 1.5)
@raises(AssertionError)
def test_cosine_join_invalid_threshold_below(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, -0.1)
@raises(AssertionError)
def test_cosine_join_invalid_threshold_zero(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 0)
@raises(AssertionError)
def test_cosine_join_invalid_comp_op_lt(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<')
@raises(AssertionError)
def test_cosine_join_invalid_comp_op_le(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<=')
@raises(AssertionError)
def test_cosine_join_invalid_l_out_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.invalid_attr'], ['B.attr'])
@raises(AssertionError)
def test_cosine_join_invalid_r_out_attr(self):
cosine_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.attr'], ['B.invalid_attr'])
class DiceJoinInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}])
self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.threshold = 0.8
@raises(TypeError)
def test_dice_join_invalid_ltable(self):
dice_join([], self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_dice_join_invalid_rtable(self):
dice_join(self.A, [], 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_invalid_l_key_attr(self):
dice_join(self.A, self.B, 'A.invalid_id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_invalid_r_key_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.invalid_id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_invalid_l_join_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.invalid_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_invalid_r_join_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.invalid_attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_numeric_l_join_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.int_attr', 'B.attr',
self.tokenizer, self.threshold)
@raises(AssertionError)
def test_dice_join_numeric_r_join_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.int_attr',
self.tokenizer, self.threshold)
@raises(TypeError)
def test_dice_join_invalid_tokenizer(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
[], self.threshold)
@raises(AssertionError)
def test_dice_join_invalid_threshold_above(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 1.5)
@raises(AssertionError)
def test_dice_join_invalid_threshold_below(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, -0.1)
@raises(AssertionError)
def test_dice_join_invalid_threshold_zero(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, 0)
@raises(AssertionError)
def test_dice_join_invalid_comp_op_lt(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<')
@raises(AssertionError)
def test_dice_join_invalid_comp_op_le(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '<=')
@raises(AssertionError)
def test_dice_join_invalid_l_out_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.invalid_attr'], ['B.attr'])
@raises(AssertionError)
def test_dice_join_invalid_r_out_attr(self):
dice_join(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr',
self.tokenizer, self.threshold, '>=', True, False,
['A.attr'], ['B.invalid_attr'])
class OverlapCoefficientJoinInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = | pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}]) | pandas.DataFrame |
import pandas as pd
import os
from scipy import signal
import matplotlib.pyplot as plt
data1n = []
data2n = []
root = 'Filtered'
emosi = ['kaget','marah','santai','senang']
def lowpass_filter(sinyal,fcl):
sampleRate = 200
wnl = fcl/(sampleRate)
b,a = signal.butter(3,wnl,'lowpass')
fil = signal.filtfilt(b, a, sinyal)
return fil
def filtering():
print("Filter dimulai, harap tunggu sebentar")
df = | pd.read_csv('Data_raw/nice1.csv') | pandas.read_csv |
#!/usr/bin/env python3
# (c) 2017-2020 L.Spiegelberg
# validates output of flights query
import pandas as pd
import os
import glob
import numpy as np
import json
import re
from tqdm import tqdm
root_path = '.'
def compare_dfs(dfA, dfB):
if len(dfA) != len(dfB):
print('not equal, lengths do not coincide {} != {}'.format(len(dfA), len(dfB)))
return False
if len(dfA.columns) != len(dfB.columns):
print('number of columns do not coincide')
return False
str_cols = list(dfA.select_dtypes([object]).columns)
numeric_cols = list(dfA.select_dtypes([bool, int, float]).columns)
# print(numeric_cols)
# print(str_cols)
if len(str_cols) + len(numeric_cols) != len(dfA.columns):
print('column separation wrong')
return False
# go over each single row (will take a lot of time)
for i in tqdm(range(len(dfA))):
rowA = dfA.iloc[i].copy()
rowB = dfB.iloc[i].copy()
num_valsA = rowA[numeric_cols].astype(np.float64)
num_valsB = rowB[numeric_cols].astype(np.float64)
if str(rowA[str_cols].values) != str(rowB[str_cols].values):
print('{} != {}'.format(str(rowA[str_cols].values), str(rowB[str_cols].values)))
print(i)
return False
if not np.allclose(num_valsA, num_valsB, rtol=1e-3, atol=1e-3, equal_nan=True):
print('{} != {}'.format(num_valsA, num_valsB))
print(i)
return False
return True
def main():
spark_folder = 'pyspark_output'
dask_folder = 'dask_output'
root_path = '.'
paths = os.listdir(root_path)
paths_to_verify = []
spark_paths = []
dask_paths = []
if spark_folder in paths:
spark_paths = glob.glob(os.path.join(root_path, spark_folder, '*.csv'))
paths_to_verify += spark_paths
if dask_folder in paths:
dask_paths = glob.glob(os.path.join(root_path, dask_folder, '*part*'))
dask_paths = sorted(dask_paths, key=lambda p: int(re.sub('[^0-9]', '', os.path.basename(p))))
paths_to_verify += dask_paths
print('>>> loading dask files ({} found)'.format(len(dask_paths)))
df_dask = pd.DataFrame()
for path in dask_paths:
df_dask = pd.concat((df_dask, pd.read_csv(path, low_memory=False)))
print('>>> loading spark files ({} found)'.format(len(spark_paths)))
df_spark = | pd.DataFrame() | pandas.DataFrame |
"""Loading example datasets."""
from os.path import dirname, join
import datetime
import io
import requests
import numpy as np
import pandas as pd
import time
def load_daily(long: bool = True):
"""2020 Covid, Air Pollution, and Economic Data.
Sources: Covid Tracking Project, EPA, and FRED
Args:
long (bool): if True, return data in long format. Otherwise return wide
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'covid_daily.zip')
df_wide = pd.read_csv(data_file_name, index_col=0, parse_dates=True)
if not long:
return df_wide
else:
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_fred_monthly():
"""
Federal Reserve of St. Louis.
from autots.datasets.fred import get_fred_data
SeriesNameDict = {'GS10':'10-Year Treasury Constant Maturity Rate',
'MCOILWTICO':'Crude Oil West Texas Intermediate Cushing Oklahoma',
'CSUSHPISA': ' U.S. National Home Price Index',
'EXUSEU': 'US Euro Foreign Exchange Rate',
'EXCHUS': 'China US Foreign Exchange Rate',
'EXCAUS' : 'Canadian to US Dollar Exchange Rate Daily',
'EMVOVERALLEMV': 'Equity Market Volatility Tracker Overall', # this is a more irregular series
'T10YIEM' : '10 Year Breakeven Inflation Rate',
'USEPUINDXM': 'Economic Policy Uncertainty Index for United States' # also very irregular
}
monthly_data = get_fred_data(fredkey = 'XXXXXXXXX', SeriesNameDict = SeriesNameDict)
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'fred_monthly.zip')
df_long = pd.read_csv(data_file_name, compression='zip')
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_monthly(long: bool = True):
"""Federal Reserve of St. Louis monthly economic indicators."""
if long:
return load_fred_monthly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_fred_monthly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_fred_yearly():
"""
Federal Reserve of St. Louis.
from autots.datasets.fred import get_fred_data
SSeriesNameDict = {'GDPA':"Gross Domestic Product",
'ACOILWTICO':'Crude Oil West Texas Intermediate Cushing Oklahoma',
'AEXUSEU': 'US Euro Foreign Exchange Rate',
'AEXCHUS': 'China US Foreign Exchange Rate',
'AEXCAUS' : 'Canadian to US Dollar Exchange Rate Daily',
'MEHOINUSA672N': 'Real Median US Household Income',
'CPALTT01USA661S': 'Consumer Price Index All Items',
'FYFSD': 'Federal Surplus or Deficit',
'DDDM01USA156NWDB': 'Stock Market Capitalization to US GDP',
'LEU0252881600A': 'Median Weekly Earnings for Salary Workers',
'LFWA64TTUSA647N': 'US Working Age Population',
'IRLTLT01USA156N' : 'Long Term Government Bond Yields'
}
monthly_data = get_fred_data(fredkey = 'XXXXXXXXX', SeriesNameDict = SeriesNameDict)
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'fred_yearly.zip')
df_long = pd.read_csv(data_file_name)
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_yearly(long: bool = True):
"""Federal Reserve of St. Louis annual economic indicators."""
if long:
return load_fred_yearly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_fred_yearly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_traffic_hourly(long: bool = True):
"""
From the MN DOT via the UCI data repository.
Yes, Minnesota is the best state of the Union.
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'traffic_hourly.zip')
df_wide = pd.read_csv(
data_file_name, index_col=0, parse_dates=True, compression='zip'
)
if not long:
return df_wide
else:
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_hourly(long: bool = True):
"""Traffic data from the MN DOT via the UCI data repository."""
return load_traffic_hourly(long=long)
def load_eia_weekly():
"""Weekly petroleum industry data from the EIA."""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'eia_weekly.zip')
df_long = pd.read_csv(data_file_name, compression='zip')
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_weekly(long: bool = True):
"""Weekly petroleum industry data from the EIA."""
if long:
return load_eia_weekly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_eia_weekly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180):
"""Test edge cases by creating a Series with values as day of week.
Args:
long (bool):
if True, return a df with columns "value" and "datetime"
if False, return a Series with dt index
categorical (bool): if True, return str/object, else return int
periods (int): number of periods, ie length of data to generate
"""
idx = pd.date_range(end=pd.Timestamp.today(), periods=periods, freq="D")
df_wide = pd.Series(idx.weekday, index=idx, name="value")
df_wide.index.name = "datetime"
if categorical:
df_wide = df_wide.replace(
{
0: "Mon",
1: "Tues",
2: "Wed",
3: "Thor's",
4: "Fri",
5: "Sat",
6: "Sun",
7: "Mon",
}
)
if long:
return df_wide.reset_index()
else:
return df_wide
def load_live_daily(
long: bool = False,
fred_key: str = None,
fred_series: list = ["DGS10", "T5YIE", "SP500", "DCOILWTICO", "DEXUSEU"],
tickers: list = ["MSFT"],
trends_list: list = ["forecasting", "cycling", "cpu", "microsoft"],
weather_data_types: list = ["AWND", "WSF2", "TAVG"],
weather_stations: list = ["USW00094846", "USW00014925"],
weather_years: int = 10,
london_air_stations: list = ['CT3', 'SK8'],
london_air_species: str = "PM25",
london_air_days: int = 180,
earthquake_days: int = 180,
earthquake_min_magnitude: int = 5,
):
"""Generates a dataframe of data up to the present day.
Args:
long (bool): whether to return in long format or wide
fred_key (str): https://fred.stlouisfed.org/docs/api/api_key.html
fred_series (list): list of FRED series IDs. This requires fredapi package
tickers (list): list of stock tickers, requires yfinance
trends_list (list): list of search keywords, requires pytrends. None to skip.
weather_data_types (list): from NCEI NOAA api data types, GHCN Daily Weather Elements
PRCP, SNOW, TMAX, TMIN, TAVG, AWND, WSF1, WSF2, WSF5, WSFG
weather_stations (list): from NCEI NOAA api station ids. Pass empty list to skip.
london_air_stations (list): londonair.org.uk source station IDs. Pass empty list to skip.
london_species (str): what measurement to pull from London Air. Not all stations have all metrics.\
earthquake_min_magnitude (int): smallest earthquake magnitude to pull from earthquake.usgs.gov. Set None to skip this.
"""
dataset_lists = []
current_date = datetime.datetime.utcnow()
try:
if fred_key is not None:
from fredapi import Fred # noqa
from autots.datasets.fred import get_fred_data
fred_df = get_fred_data(fred_key, fred_series, long=False)
fred_df.index = fred_df.index.tz_localize(None)
dataset_lists.append(fred_df)
except ModuleNotFoundError:
print("pip install fredapi (and you'll also need an api key)")
except Exception as e:
print(f"FRED data failed: {repr(e)}")
for ticker in tickers:
try:
import yfinance as yf
msft = yf.Ticker(ticker)
# get historical market data
msft_hist = msft.history(period="max")
msft_hist = msft_hist.rename(columns=lambda x: x.lower().replace(" ", "_"))
msft_hist = msft_hist.rename(columns=lambda x: ticker.lower() + "_" + x)
try:
msft_hist.index = msft_hist.index.tz_localize(None)
except Exception:
pass
dataset_lists.append(msft_hist)
time.sleep(1)
except ModuleNotFoundError:
print("You need to: pip install yfinance")
except Exception as e:
print(f"yfinance data failed: {repr(e)}")
str_end_time = current_date.strftime("%Y-%m-%d")
start_date = (current_date - datetime.timedelta(days=360 * weather_years)).strftime(
"%Y-%m-%d"
)
for wstation in weather_stations:
try:
wbase = "https://www.ncei.noaa.gov/access/services/data/v1/?dataset=daily-summaries"
wargs = f"&dataTypes={','.join(weather_data_types)}&stations={wstation}"
wargs = (
wargs
+ f"&startDate={start_date}&endDate={str_end_time}&boundingBox=90,-180,-90,180&units=standard&format=csv"
)
wdf = pd.read_csv(wbase + wargs)
wdf['DATE'] = pd.to_datetime(wdf['DATE'], infer_datetime_format=True)
wdf = wdf.set_index('DATE').drop(columns=['STATION'])
wdf.rename(columns=lambda x: wstation + "_" + x, inplace=True)
dataset_lists.append(wdf)
time.sleep(1)
except Exception as e:
print(f"weather data failed: {repr(e)}")
str_end_time = current_date.strftime("%d-%b-%Y")
start_date = (current_date - datetime.timedelta(days=london_air_days)).strftime(
"%d-%b-%Y"
)
for asite in london_air_stations:
try:
# abase = "http://api.erg.ic.ac.uk/AirQuality/Data/Site/Wide/"
# aargs = "SiteCode=CT8/StartDate=2021-07-01/EndDate=2021-07-30/csv"
abase = 'https://www.londonair.org.uk/london/asp/downloadsite.asp'
aargs = f"?site={asite}&species1={london_air_species}m&species2=&species3=&species4=&species5=&species6=&start={start_date}&end={str_end_time}&res=6&period=daily&units=ugm3"
s = requests.get(abase + aargs).content
adf = pd.read_csv(io.StringIO(s.decode('utf-8')))
acol = adf['Site'].iloc[0] + "_" + adf['Species'].iloc[0]
adf['Datetime'] = pd.to_datetime(adf['ReadingDateTime'], dayfirst=True)
adf[acol] = adf['Value']
dataset_lists.append(adf[['Datetime', acol]].set_index("Datetime"))
time.sleep(1)
# "/Data/Traffic/Site/SiteCode={SiteCode}/StartDate={StartDate}/EndDate={EndDate}/Json"
except Exception as e:
print(f"London Air data failed: {repr(e)}")
if earthquake_min_magnitude is not None:
try:
str_end_time = current_date.strftime("%Y-%m-%d")
start_date = (
current_date - datetime.timedelta(days=earthquake_days)
).strftime("%Y-%m-%d")
# is limited to ~1000 rows of data, ie individual earthquakes
ebase = "https://earthquake.usgs.gov/fdsnws/event/1/query?"
eargs = f"format=csv&starttime={start_date}&endtime={str_end_time}&minmagnitude={earthquake_min_magnitude}"
eq = pd.read_csv(ebase + eargs)
eq["time"] = pd.to_datetime(eq["time"], infer_datetime_format=True)
eq["time"] = eq["time"].dt.tz_localize(None)
eq.set_index("time", inplace=True)
global_earthquakes = eq.resample("1D").agg(
{"mag": "mean", "depth": "count"}
)
global_earthquakes["mag"] = global_earthquakes["mag"].fillna(
earthquake_min_magnitude
)
global_earthquakes = global_earthquakes.rename(
columns={
"mag": "largest_magnitude_earthquake",
"depth": "count_large_earthquakes",
}
)
dataset_lists.append(global_earthquakes)
except Exception as e:
print(f"earthquake data failed: {repr(e)}")
if trends_list is not None:
try:
from pytrends.request import TrendReq
pytrends = TrendReq(hl="en-US", tz=360)
# pytrends.build_payload(kw_list, cat=0, timeframe='today 5-y', geo='', gprop='')
pytrends.build_payload(trends_list, timeframe="all")
gtrends = pytrends.interest_over_time()
gtrends.index = gtrends.index.tz_localize(None)
gtrends.drop(columns="isPartial", inplace=True, errors="ignore")
dataset_lists.append(gtrends)
except ImportError:
print("You need to: pip install pytrends")
except Exception as e:
print(f"pytrends data failed: {repr(e)}")
if len(dataset_lists) < 1:
raise ValueError("No data successfully downloaded!")
elif len(dataset_lists) == 1:
df = dataset_lists[0]
else:
from functools import reduce
df = reduce(
lambda x, y: pd.merge(x, y, left_index=True, right_index=True, how="outer"),
dataset_lists,
)
print(f"{df.shape[1]} series downloaded.")
if not long:
return df
else:
df_long = df.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_zeroes(long=False, shape=None, start_date: str = "2021-01-01"):
"""Create a dataset of just zeroes for testing edge case."""
if shape is None:
shape = (200, 5)
df_wide = pd.DataFrame(
np.zeros(shape), index=pd.date_range(start_date, periods=shape[0], freq="D")
)
if not long:
return df_wide
else:
df_wide.index.name = "datetime"
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_linear(
long=False,
shape=None,
start_date: str = "2021-01-01",
introduce_nan: float = None,
introduce_random: float = None,
random_seed: int = 123,
):
"""Create a dataset of just zeroes for testing edge case.
Args:
long (bool): whether to make long or wide
shape (tuple): shape of output dataframe
start_date (str): first date of index
introduce_nan (float): percent of rows to make null. 0.2 = 20%
introduce_random (float): shape of gamma distribution
random_seed (int): seed for random
"""
if shape is None:
shape = (500, 5)
idx = pd.date_range(start_date, periods=shape[0], freq="D")
df_wide = pd.DataFrame(np.ones(shape), index=idx)
df_wide = (df_wide * list(range(0, shape[1]))).cumsum()
if introduce_nan is not None:
df_wide = df_wide.sample(
frac=(1 - introduce_nan), random_state=random_seed
).reindex(idx)
if introduce_random is not None:
df_wide = df_wide + np.random.default_rng(random_seed).gamma(
introduce_random, size=shape
)
if not long:
return df_wide
else:
df_wide.index.name = "datetime"
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_sine(long=False, shape=None, start_date: str = "2021-01-01"):
"""Create a dataset of just zeroes for testing edge case."""
if shape is None:
shape = (500, 5)
df_wide = pd.DataFrame(
np.ones(shape),
index=pd.date_range(start_date, periods=shape[0], freq="D"),
columns=range(shape[1]),
)
X = | pd.to_numeric(df_wide.index, errors='coerce', downcast='integer') | pandas.to_numeric |
import argparse
import warnings
from io import StringIO
import joblib
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
import volcengine_ml_platform
from volcengine_ml_platform import constant
from volcengine_ml_platform.io import tos
from volcengine_ml_platform.util import cache_dir
warnings.filterwarnings(action="ignore", category=UserWarning)
volcengine_ml_platform.init()
client = tos.TOSClient()
BUCKET = constant.get_public_examples_readonly_bucket()
CACHE_DIR = cache_dir.create("price_prediction/xgboost")
zero_list = [
"MasVnrArea",
"GarageCars",
"GarageArea",
"BsmtHalfBath",
"BsmtFullBath",
]
na_list = [
"PoolQC",
"MiscFeature",
"Alley",
"Fence",
"GarageFinish",
"GarageQual",
"GarageCond",
"GarageType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"FireplaceQu",
]
na_values = [
"Electrical",
"Functional",
"Utilities",
"Exterior2nd",
"Exterior1st",
"KitchenQual",
"SaleType",
"MSZoning",
"MasVnrType",
"BsmtHalfBath",
"BsmtFullBath",
"TotalBsmtSF",
"BsmtUnfSF",
"BsmtFinSF2",
"BsmtFinSF1",
]
csv_string_train = (
client.get_object(bucket=BUCKET, key="house-price-prediction/dataset/train.csv")
.read()
.decode("utf-8")
)
train = pd.read_csv(StringIO(csv_string_train))
csv_string_test = (
client.get_object(bucket=BUCKET, key="house-price-prediction/dataset/test.csv")
.read()
.decode("utf-8")
)
test = pd.read_csv(StringIO(csv_string_test))
test_ids = test.Id
train.drop(["Id"], axis=1, inplace=True)
train_tmp = train.drop(["SalePrice"], axis=1)
test = test.drop(["Id"], axis=1)
total = pd.concat([train_tmp, test]).reset_index(drop=True)
def fill_zero_values(zero_list):
global train, test, test_ids, total
for elem in zero_list:
total[elem] = total[elem].fillna(0)
train[elem] = train[elem].fillna(0)
test[elem] = test[elem].fillna(0)
def fill_na_values(na_list):
global train, test, test_ids, total
for elem in na_list:
total[elem] = total[elem].fillna("NA")
train[elem] = train[elem].fillna("NA")
test[elem] = test[elem].fillna("NA")
def replace_with_mode(na_values):
global train, test, test_ids, total
for elem in na_values:
total[elem] = total[elem].fillna(total[elem].mode()[0])
train[elem] = train[elem].fillna(train[elem].mode()[0])
test[elem] = test[elem].fillna(test[elem].mode()[0])
def replace_with_linear_regression():
# execute a linear regression to replace the null values
global train, test, test_ids, total
total_lot = total[
( | pd.notna(total.LotFrontage) | pandas.notna |
from shapely import geometry
import numpy as np
import xarray as xr
import pandas as pd
import geopandas as gpd
from .base_class_for_query_of_nearest_points import Query_Nearest_Points
def _get_nearest_pixels(ground_pixel_tree,
xy,
radius=100 # in meters
):
""" Query the kd-tree for all point within distance
radius of point(s) x
Keyword arguments:
point -- a (lat, lon) tuple or array of tuples
radius -- the search radius (km)
"""
if isinstance(xy, geometry.Point):
point = (xy.y, xy.x)
elif len(xy) == 2:
point = xy
else:
raise AttributeError('The geometry must have both lon and latitude (geographic crs), \
or x and y (planar crs)')
rome_index = ground_pixel_tree.query_ball_point(point, radius)
return ground_pixel_tree.dataset[rome_index]
def get_nearest_pixels(da_array,
xy,
radius=1,
lat_coord_name='lat',
lon_coord_name='lon',
da_array_crs_epsg=4326,
target_epsg=4978):
ground_pixel_tree = Query_Nearest_Points(da_array,
lat_coord_name=lat_coord_name,
lon_coord_name=lon_coord_name,
da_array_crs_epsg=da_array_crs_epsg,
target_epsg=target_epsg)
return _get_nearest_pixels(ground_pixel_tree, xy, radius=1)
def reduce_df(da, reductions = {'mean', 'max', 'min', 'std', 'count'},
variable_columns=[]):
df = da.to_dataframe()
df_reduced = df.agg(reductions)
df_reduced = df_reduced.loc[: , variable_columns]
return df_reduced
def query_nearest_points(ground_pixel_tree,
xy,
k,
da_time_coord_name = 'time',
gdf_time_coord_name='datetime',
dict_of_windows = {'time_window':3,
'time_unit':'D'}
):
time_delta = pd.Timedelta(dict_of_windows['time_window'],
dict_of_windows['time_unit'])
time_init = pd.to_datetime(xy[gdf_time_coord_name]) - time_delta
time_end = | pd.to_datetime(xy[gdf_time_coord_name]) | pandas.to_datetime |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.my_normalize_data import (
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
normalize_columns
)
class XTestNormalizeColumns:
def test_replace_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A"}
data = {"aa": [1]}
df = pd.DataFrame(data)
data = {"A": [1]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_replace_multiple_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_not_in_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2], "cc": [3]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_if_columns_mapping_has_no_value(self):
columns_mapping = {"aa": None, "bb": "", "cc": np.nan}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex,
to_datetime, date_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assert_index_equal(shifted.index, ps.index)
self.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].valid().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
# GH12800
empty = DataFrame()
self.assertIsNone(empty.last_valid_index())
self.assertIsNone(empty.first_valid_index())
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
| pd.Timestamp('2012-05-01') | pandas.Timestamp |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/2 15:58
Desc: 东方财富-股票-财务分析
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_balance_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按报告期
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i: i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_balance_sheet_by_yearly_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-资产负债表-按年度
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 资产负债表-按年度
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "1",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
temp_df["REPORT_DATE"] = pd.to_datetime(temp_df["REPORT_DATE"]).dt.date
temp_df["REPORT_DATE"] = temp_df["REPORT_DATE"].astype(str)
need_date = temp_df["REPORT_DATE"].tolist()
sep_list = [",".join(need_date[i: i + 5]) for i in range(0, len(need_date), 5)]
big_df = pd.DataFrame()
for item in tqdm(sep_list, leave=False):
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/zcfzbAjaxNew"
params = {
"companyType": "4",
"reportDateType": "1",
"reportType": "1",
"dates": item,
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
return big_df
def stock_profit_sheet_by_report_em(symbol: str = "SH600519") -> pd.DataFrame:
"""
东方财富-股票-财务分析-利润表-报告期
https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/Index?type=web&code=sh600519#lrb-0
:param symbol: 股票代码; 带市场标识
:type symbol: str
:return: 利润表-报告期
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/NewFinanceAnalysis/lrbDateAjaxNew"
params = {
"companyType": "4",
"reportDateType": "0",
"code": symbol,
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = | pd.DataFrame(data_json["data"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from .discretize import discretize
from .feature import ContinuousFeature, CategoricalFeature, MultiCategoryFeature
from ..utils.logger import logger
class FeatureDict:
def __init__(self):
self.continuous_feats = {}
self.categorical_feats = {}
self.multi_category_feats = {}
self.feat_dict = {}
def add_continuous_feat(self, name, transformation=None, discretize=None, discretize_bin=10):
self.delete_feat(name)
self.continuous_feats[name] = ContinuousFeature(name, transformation, discretize, discretize_bin)
self.feat_dict[name] = 'continuous'
def add_categorical_feat(self, name, all_categories=None):
self.delete_feat(name)
self.categorical_feats[name] = CategoricalFeature(name, all_categories)
self.feat_dict[name] = 'categorical'
def add_multi_category_feat(self, name, all_categories=None):
self.delete_feat(name)
self.multi_category_feats[name] = MultiCategoryFeature(name, all_categories)
self.feat_dict[name] = 'multi_category'
def delete_feat(self, name):
if name in self.feat_dict:
feat_type = self.feat_dict[name]
if feat_type == 'continuous':
del self.continuous_feats[name]
elif feat_type == 'categorical':
del self.categorical_feats[name]
elif feat_type == 'multi_category':
del self.multi_category_feats[name]
def feature_size(self):
total_size = 0
total_size += len(self.continuous_feats)
for key in self.categorical_feats:
feat = self.categorical_feats[key]
total_size += feat.dim
for key in self.multi_category_feats:
feat = self.multi_category_feats[key]
total_size += feat.dim
return total_size
def field_size(self):
"""
Num of features keys
:return: int
"""
return len(self.feat_dict)
def field_range(self):
fields = []
for k,v in self.continuous_feats.items():
fields.append(v.dim)
for k,v in self.categorical_feats.items():
fields.append(v.dim)
for k,v in self.multi_category_feats.items():
fields.append(v.dim)
return fields
def __repr__(self):
feats_list = [self.continuous_feats, self.categorical_feats, self.multi_category_feats]
info_strs = []
for feats in feats_list:
info_str = ''
for key in feats:
feat = feats[key]
info_str += str(feat)
info_str += '\n'
info_strs.append(info_str)
return 'Continuous Features:\n{}Categorical Features:\n{}Multi-Category Features:\n{}'.format(*info_strs)
def process_features(features: FeatureDict, data: pd.DataFrame):
r"""Transform raw data into index and value form.
Continuous features will be discretized, standardized, normalized or scaled according to feature meta.
Categorical features will be encoded with a label encoder.
:param features: The FeatureList instance that describes raw_data.
:param data: The raw_data to be transformed.
:return: feat_index, feat_value, category_index, continuous_value (DataFrame)
"""
logger.info('process_features start')
continuous_feats = features.continuous_feats
categorical_feats = features.categorical_feats
columns = list(continuous_feats.keys())
columns.extend(list(categorical_feats.keys()))
data = data[columns]
feat_idx = pd.DataFrame()
# transform continuous features
logger.info('transforming continuous features')
feat_value_continuous = pd.DataFrame()
idx = 0
for name in continuous_feats:
feat = continuous_feats[name]
feat.start_idx = idx
if feat.discretize:
# use discretize
discrete_data, intervals = discretize(data[name], feat.discretize, feat.dim)
feat.bins = intervals
feat_idx[name] = discrete_data + idx
feat_value_continuous[name] = pd.Series(np.ones(len(data[name])))
idx += feat.dim
else:
# standardized, normalize or MinMaxScaler
processor = feat.transformation
col_data = np.reshape(data[name].values, (-1, 1))
col_data = processor.fit_transform(col_data)
col_data = np.reshape(col_data, -1)
feat_value_continuous[name] = col_data
feat_idx[name] = np.repeat(idx, repeats=len(data))
idx += 1
logger.info('transforming categorical features')
# transform categorical features
categorical_index = pd.DataFrame()
for name in categorical_feats:
categorical_feat = categorical_feats[name]
le = LabelEncoder()
feat_idx[name] = le.fit_transform(data[name]) + idx
categorical_index[name] = feat_idx[name]
categorical_feat.processor = le
num_classes = len(le.classes_)
categorical_feat.dim = num_classes
categorical_feat.start_idx = idx
idx += num_classes
feat_idx = feat_idx.apply(lambda x: x.values, axis=1)
categorical_index = categorical_index.apply(lambda x: x.values, axis=1)
feat_value_category = pd.DataFrame(np.ones((len(data), len(categorical_feats))))
feat_value = | pd.concat([feat_value_continuous, feat_value_category], axis=1) | pandas.concat |
# encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:100000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = | pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 10:31:11 2020
@author: <NAME>
"""
# Imports
import os
import pandas as pd
import itertools
import numpy as np
# import multiprocessing as mp
from operator import itemgetter
from support_modules.readers import log_reader as lr
from support_modules import role_discovery as rl
from support_modules import support as sup
class IntercasFeatureExtractor():
"""
This is the man class encharged of the Feature extraction
"""
def __init__(self, params, log, user_role):
"""constructor"""
self.log = log
self.settings = params
self.sub_group = params['sub_group']
self.expanded_log = pd.DataFrame
self.event_slides = pd.DataFrame
self.user_role = user_role
# create input folder
self.process_temp_folder(params['temp_input'])
# create output folder
self.process_temp_folder(params['temp_output'])
# Split and process eventlog
self.data_source_preparation()
# Process folds
self.process_folds()
def data_source_preparation(self):
self.log['event_id'] = self.log.index
self.log = self.calculate_event_duration(self.log)
# Split event log
self.folding_creation(self.log,
self.settings['splits'],
self.settings['temp_input'])
@staticmethod
def folding_creation(log, splits, output):
log = log.sort_values(by='end_timestamp')
idxs = [x for x in range(0, len(log),
round(len(log)/splits))]
idxs.append(len(log))
log['lead_trail'] = False
folds = [pd.DataFrame(log.iloc[idxs[i-1]:idxs[i]])
for i in range(1, len(idxs))]
for i in range(1, len(folds)):
fold = folds[i]
# Find initial incomplete traces
inc_traces = pd.DataFrame(fold.groupby('caseid')
.first()
.reset_index())
inc_traces = inc_traces[inc_traces.pos_trace > 0]
inc_traces = inc_traces['caseid'].to_list()
# find completion of incomplete traces
prev_fold = folds[i - 1]
times = prev_fold[(prev_fold.caseid.isin(inc_traces)) &
(prev_fold.lead_trail == False)]
del inc_traces
# Define timespan for leading events
minimum = times.end_timestamp.min()
leading = prev_fold[(prev_fold.end_timestamp >= minimum) &
(prev_fold.lead_trail == False)]
leading = leading.caseid.to_list()
leading = pd.DataFrame(prev_fold[prev_fold.caseid.isin(leading)])
minimum = leading.groupby('caseid').tail(2).reset_index()
minimum = minimum.end_timestamp.min()
leading = leading[leading.end_timestamp >= minimum]
leading['lead_trail'] = True
# Attach leading events
folds[i] = pd.concat([leading, fold], axis=0, ignore_index=True)
del leading
del fold
for i in range(0, len(folds)-1):
fold = folds[i]
# Find initial incomplete traces
inc_traces = pd.DataFrame(fold.groupby('caseid')
.last()
.reset_index())
inc_traces = inc_traces[inc_traces.pos_trace < inc_traces.trace_len]
inc_traces = inc_traces['caseid'].to_list()
# find completion of incomplete traces
next_fold = folds[i + 1]
times = next_fold[(next_fold.caseid.isin(inc_traces)) &
(next_fold.lead_trail == False)]
del inc_traces
# Define timespan for leading events
maximum = times.end_timestamp.max()
trailing = pd.DataFrame(
next_fold[(next_fold.end_timestamp <= maximum) &
(next_fold.lead_trail == False)])
trailing['lead_trail'] = True
# Attach leading events
folds[i] = pd.concat([fold, trailing], axis=0, ignore_index=True)
del trailing
del fold
# Export folds
for i, fold in enumerate(folds):
fold.to_csv(os.path.join(output,'split_'+str(i+1)+'.csv'))
def process_folds(self):
for fold in self.create_file_list(self.settings['temp_input']):
print('processing split', fold, sep=':')
log_path = os.path.join(self.settings['temp_input'], fold)
self.log = pd.read_csv(log_path, index_col='Unnamed: 0')
self.log['end_timestamp'] = pd.to_datetime(self.log['end_timestamp'],
format='%Y-%m-%d %H:%M:%S')
self.log = self.log.sort_values(by='event_id')
print('Expanding event-log')
self.expanded_log_creation()
print('Calculating features')
self.calculate_features()
print('filter leading events')
self.log = self.log[self.log.lead_trail==False]
self.log['fold'] = fold
self.log.to_csv(os.path.join(self.settings['temp_output'], fold))
# clean memory
del self.expanded_log
del self.log
# Read proceced folds
print('Processing outputs')
folds = list()
for filename in self.create_file_list(self.settings['temp_output']):
df = pd.read_csv(os.path.join(self.settings['temp_output'], filename),
index_col='Unnamed: 0')
folds.append(df)
processed_log = pd.concat(folds, axis=0, ignore_index=True)
processed_log = processed_log.sort_values(by='event_id')
# Clean folders
self.process_temp_folder(self.settings['temp_input'])
os.rmdir(self.settings['temp_input'])
self.process_temp_folder(self.settings['temp_output'])
os.rmdir(self.settings['temp_output'])
processed_log.to_csv(os.path.join(
'outputs', 'inter_'+self.settings['file_name'].split('.')[0]+'.csv'))
# =============================================================================
# Expanded log management
# =============================================================================
def expanded_log_creation(self):
# Matching events with slices
ranges = self.split_events(self.log)
ranges = self.match_slices(ranges)
ranges_slides = {k: r['events'] for k, r in ranges.items()}
ranges = | pd.DataFrame.from_dict(ranges, orient='index') | pandas.DataFrame.from_dict |
from tkinter import *
from datetime import timedelta, datetime
from urllib.request import urlopen, Request, urlretrieve
import urllib
from urllib import request
from pathlib import Path
import urllib.error
from urllib.request import Request, urlopen
import os
import sys
import pandas as pd
import numpy as np
import requests
import csv
import io
import gspread_dataframe as gd
from lxml import html
from lxml import etree
from openpyxl import load_workbook
position_fii, position_dii = 0, 0
workbookPath = 'test.xlsx'
def dii_and_fii_data(date):
"""DIIs and FIIs Data Single Day"""
# The given url requires date to be in the format ---- ddmmyyyy
url = 'https://www.nseindia.com/content/nsccl/fao_participant_oi_' + \
date.replace('-', '') + '.csv'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
r = requests.get(url, headers=hdr)
df = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
try:
new_header = df.iloc[0] # grab the first row for the header
df = df[1:] # take the data less the header row
df.columns = new_header # set the header row as the df header
df.insert(loc=0, column="Date", value=date)
df_right_dii = df.loc[2:2, ('Future Index Long',
'Future Index Short',
'Future Stock Long',
'Future Stock Short',
'Option Index Call Long',
'Option Index Put Long',
'Option Index Call Short',
'Option Index Put Short')]
df_right_fii = df.loc[3:3, ('Future Index Long',
'Future Index Short',
'Future Stock Long',
'Future Stock Short',
'Option Index Call Long',
'Option Index Put Long',
'Option Index Call Short',
'Option Index Put Short')]
df_date = df.loc[2:2, ('Date',)]
except KeyError:
print("[+] Sorry, content for %s is not available online,\nKindly try after 7:30 PM for Today's Contents"%(date))
sys.exit(1)
return df_date, df_right_dii, df_right_fii
def availableDate(date):
"""Find next available data on site.
This removes the possibilty of holidays in the list.
Returns working day DATE as str
Sub-module: <Only for use with nextDate function>
DO NOT TOUCH"""
url = 'https://www.nseindia.com/content/nsccl/fao_participant_oi_' + date + '.csv'
r = requests.get(url)
tree = html.fromstring(r.content)
checkDate = tree.findtext('.//title')
# p Returns None if Data to be scrapped is found
# p Returns 404 Not Found if Data to be scrapped is not found
return checkDate
def findDate():
"""Returns the str of Last filled Date and next Date to be filled"""
global position_dii, position_fii
df = pd.read_excel(workbookPath)
lastFilledDate = pd.isna(df['Unnamed: 16']).index[-1]
# This gives the row index from which data can be started appending
position_fii = len(df) + 1
position_dii = len(df) + 1 - 378
nextDate = (datetime.strptime(lastFilledDate, '%d-%m-%Y') +
timedelta(days=1)).strftime('%d-%m-%Y')
while availableDate(nextDate.replace('-', '')) == '404 Not Found':
nextDate = (datetime.strptime(nextDate, '%d-%m-%Y') +
timedelta(days=1)).strftime('%d-%m-%Y')
return lastFilledDate, nextDate
def niftySpot(date):
"""Returns the nifty closing value of the day as string"""
# Requires date format to be dd-mm-yyyy
url = "https://www.nseindia.com/products/dynaContent/equities/indices/historicalindices.jsp?indexType=NIFTY%2050&fromDate=" + date + "&toDate=" + date
page = requests.get(url)
tree = html.fromstring(page.content)
try:
nifty_close = tree.xpath('/html/body/table/tr/td[5]/text()')[0].strip()
return nifty_close
except IndexError:
print("Sorry the nifty value of %s, has not been refreshed online yet. \nKindly try after 7:30 PM"%(date))
sys.exit(1)
def dataAppend():
# lastFilledDate = findDate()[0]
# now.time() > datetime.time(hour=8)
while datetime.now().strftime('%d-%m-%Y') != findDate()[0]:
if datetime.now().strftime('%d-%m-%Y') == findDate()[0]:
print("[+][+] Process Completed")
break
# lastFilledDate = findDate()[1]
# Load current date inside the variable, thus changing according to the loop of the function
date = findDate()[1]
# Load the excel file into the script
book = load_workbook(workbookPath)
writer = pd.ExcelWriter(
workbookPath, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
# Get the value to be appended for a given date in the loop
df_date, df_right_dii, df_right_fii = dii_and_fii_data(date)
nifty_close = niftySpot(date)
# Appending Date to FIIs Data and DIIs Data
print("[+] Appending Dates of FIIs and DIIs of date %s to row %s (FII) and row %s (DII)" %
(date, position_fii, position_dii))
df_date.to_excel(writer, "FII Activity", startrow=position_fii,
index=False, header=None)
df_date.to_excel(writer, "DII", startrow=position_dii,
index=False, header=None)
# Appending FII and nifty information to FIIs Data
print("[+] Appending Data of FIIs and Nifty of date %s to row %s" %
(date, position_fii))
df_right_fii.to_excel(writer, "FII Activity", startrow=position_fii,
startcol=14, index=False, header=None)
| pd.DataFrame(data=[nifty_close]) | pandas.DataFrame |
from typing import List
import numpy as np
import pandas as pd
from trader.core.model import Position
from trader.core.const.candle_index import OPEN_TIME_INDEX
from trader.core.util.common import Storable
import plotly.graph_objects as go
class TradeReport(Storable):
def __init__(
self,
start_cash: float,
positions: List[Position],
trade_ratio: float,
candles: np.ndarray,
interval: str,
leverage: int,
):
self.candles = candles
self.start_timestamp = candles[0][OPEN_TIME_INDEX]
self.end_timestamp = candles[-1][OPEN_TIME_INDEX]
self.trade_ratio = trade_ratio
self.interval = interval
self.start_cash = start_cash
self.profits = tuple(position.profit() for position in positions)
self.end_cash = sum(self.profits) + self.start_cash
self.leverage = leverage
self.wins = len(tuple(position for position in positions if position.profit() > 0))
self.losses = len(tuple(position for position in positions if position.profit() < 0))
@property
def number_of_candles(self):
return self.candles.shape[0]
@property
def start_time(self):
return | pd.to_datetime(self.start_timestamp, unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 16:30:16 2019
@author: <NAME>
"""
### Program for controlling BK8542B DC Electronic Load for IV curve measurement of solar panel ###
import serial, time, csv, os
import pandas as pd
import itertools as it
from time import strftime
from array import array
global ser, ser_relay, resp_status_dict, mode_cc, mode_cv, mode_cw, mode_cr
global scale_curr, scale_volt, scale_watt, scale_resi
global r1, r2, r3, r4, r5, r6, r7, r8
global sample_id
sample_id = 29381
# Initialize PC-load serial communication and global variables
def init_load():
"""Docstring"""
global ser, resp_status_dict, mode_cc, mode_cv, mode_cw, mode_cr
global scale_curr, scale_volt, scale_watt, scale_resi
baudrate = 9600
port = "COM4"
ser = serial.Serial(port,baudrate, timeout=1)
resp_status_dict = {
0x90: "ERROR: Invalid checksum",
0xA0: "ERROR: Invalid value",
0xB0: "ERROR: Unable to execute",
0xC0: "ERROR: invalid command",
0x80: True,
}
mode_cc = 0 # constant current mode
mode_cv = 1 # constant voltage mode
mode_cw = 2 # constant power mode
mode_cr = 3 # constant resistance mode
scale_volt = 1000
scale_curr = 10000
scale_watt = 1000
scale_resi = 1000
def close():
"""Docstring"""
ser.close()
def parse_data(resp):
"""Docstring"""
data = resp[4] | (resp[5] << 8) | (resp[6] << 16) | (resp[7] << 24)
print(data)
return data
def check_resp(resp):
"""Docstring"""
if len(resp) == 26:
# Confirm start byte
if resp[0] == 0xAA:
resp_type = resp[2]
if resp_type == 0x12: # Status type
return resp_status_dict[resp[3]]
else:
return True
else:
print('Start byte mismatch')
return None
else:
print('Packet length mismatch')
return None
def build_cmd(cmd, value=None):
"""Docstring"""
build_cmd = array('B', [0x00]*26)
build_cmd[0] = 0xAA # Packet start
build_cmd[1] = 0x00 # Unsupported address location
build_cmd[2] = cmd & 0xFF # Command value
if value is not None:
build_cmd[3] = value & 0xFF # value 1st byte little endian
build_cmd[4] = (value >> 8) & 0xFF # value 2nd byte little endian
build_cmd[5] = (value >> 16) & 0xFF # value 3rd byte little endian
build_cmd[6] = (value >> 24) & 0xFF # value 4th byte little endian
checksum = 0
for item in build_cmd:
checksum += item
checksum %= 256
build_cmd[25] = checksum & 0xFF
return build_cmd.tobytes()
def send_recv_cmd(cmd_packet):
"""Docstring"""
# House cleaning, flush serial input and output bufferss
ser.reset_output_buffer()
ser.reset_input_buffer()
# Send and receive
ser.write(cmd_packet)
time.sleep(0.250) # Provide time for response
resp_array = array('B', ser.read(26)) # get resp and put in array
check = check_resp(resp_array)
if check is True:
return resp_array
else:
print('Response check failed')
print(check)
return None
def get_device_info():
"""Docstring"""
built_packet = build_cmd(0x6A)
resp = send_recv_cmd(built_packet)
if resp is not None:
model = chr(resp[3]) + chr(resp[4]) + chr(resp[5]) + chr(resp[6])
version = str(resp[9]) + '.' + str(resp[8])
serial = chr(resp[10]) + chr(resp[11]) + chr(resp[12]) + chr(resp[13]) + chr(resp[14]) + chr(resp[16]) + chr(resp[17]) + chr(resp[18]) + chr(resp[19])
return (model, version, serial)
else:
return None
def get_input_values():
"""Docstring"""
built_packet = build_cmd(0x5F)
resp = send_recv_cmd(built_packet)
if resp is not None:
volts = (resp[3] | (resp[4] << 8) | (resp[5] << 16) | (resp[6] << 24)) / scale_volt
current = (resp[7] | (resp[8] << 8) | (resp[9] << 16) | (resp[10] << 24)) / scale_curr
power = (resp[11] | (resp[12] << 8) | (resp[13] << 16) | (resp[14] << 24)) / scale_watt
return (volts, current, power)
else:
return None
def set_function(function):
"""Docstring"""
built_packet = build_cmd(0x5D, value=function)
resp = send_recv_cmd(built_packet)
return resp
def get_function():
built_packet = build_cmd(0x5E)
resp = send_recv_cmd(built_packet)
if resp is not None:
return resp[3]
else:
return None
def set_remote_sense(is_remote=False):
"""Docstring"""
built_packet = build_cmd(0x56, value=int(is_remote))
resp = send_recv_cmd(built_packet)
return resp
def get_remote_sense():
"""Docstring"""
built_packet = build_cmd(0x57)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp)
else:
return None
def set_remote_control(is_remote=False):
"""Docstring"""
built_packet = build_cmd(0x20, value=int(is_remote))
resp = send_recv_cmd(built_packet)
if is_remote == False:
return False
else:
return True
def set_local_control(is_local=True):
"""Docstring"""
built_packet = build_cmd(0x55, value=int(is_local))
resp = send_recv_cmd(built_packet)
return resp
def set_mode(mode):
"""Docstring"""
built_packet = build_cmd(0x28, value=mode)
resp = send_recv_cmd(built_packet)
return resp
def get_mode():
"""Docstring"""
built_packet = build_cmd(0x29)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp)
else:
return None
def set_enable_load(is_enabled=False):
"""Docstring"""
built_packet = build_cmd(0x21, value=int(is_enabled))
resp = send_recv_cmd(built_packet)
return resp
def set_max_volts(max_volts=0):
"""Docstring"""
built_packet = build_cmd(0x22, value=int(max_volts))
resp = send_recv_cmd(built_packet)
return resp
def get_max_volts():
"""Docstring"""
built_packet = build_cmd(0x23)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_max_current( max_current=0):
"""Docstring"""
built_packet = build_cmd(0x24, value=int(max_current * scale_curr))
resp = send_recv_cmd(built_packet)
return resp
def get_max_current():
"""Docstring"""
built_packet = build_cmd(0x25)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_curr
else:
return None
def set_max_power( max_power=0):
"""Docstring"""
built_packet = build_cmd(0x24, value=int(max_power * scale_watt))
resp = send_recv_cmd(built_packet)
return resp
def get_max_power():
"""Docstring"""
built_packet = build_cmd(0x27)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_CV_volts(cv_volts=0):
"""Docstring"""
built_packet = build_cmd(0x2C, value=int(cv_volts * scale_volt))
resp = send_recv_cmd(built_packet)
return resp
def get_CV_volts():
"""Docstring"""
built_packet = build_cmd(0x2D)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
def set_CC_current(cc_current=0):
"""Docstring"""
built_packet = build_cmd(0x2A, value=int(cc_current * scale_curr))
resp = send_recv_cmd(built_packet)
return resp
def get_CC_current():
"""Docstring"""
built_packet = build_cmd(0x2B)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_curr
else:
return None
def get_CP_power():
"""Docstring"""
def set_CP_power(cp_power=0):
"""Docstring"""
built_packet = build_cmd(0x2E, value=int(cp_power * scale_watt))
resp = send_recv_cmd(built_packet)
return resp
built_packet = build_cmd(0x2F)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_watt
else:
return None
def set_CR_resistance(cr_resistance=0):
"""Docstring"""
built_packet = build_cmd(0x30, value=int(cr_resistance * scale_resi))
resp = send_recv_cmd(built_packet)
return resp
def get_CR_resistance():
"""Docstring"""
built_packet = build_cmd(0x31)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_resi
else:
return None
def set_bat_volts_min(min_volts=3):
"""Docstring"""
built_packet = build_cmd(0x4E, value=int(min_volts * scale_volt))
resp = send_recv_cmd(built_packet)
return resp
def get_bat_volts_min():
"""Docstring"""
built_packet = build_cmd(0x4F)
resp = send_recv_cmd(built_packet)
if resp is not None:
return parse_data(resp) / scale_volt
else:
return None
#______________________________________________________________________________
### Vellemen VM8090 relay card communication and control
def init_relay_card():
global ser_relay
port = "COM5"
baudrate = 19200
ser_relay = serial.Serial(port,baudrate, timeout=1)
def close_relay():
"""Disconnect from relay card"""
ser_relay.close()
def build_cmd_relay(cmd_relay, which_relay):
"""Construct command for relay card"""
global r1, r2, r3, r4, r5, r6, r7, r8
r1 = 0x01
r2 = 0x02
r3 = 0x04
r4 = 0x08
r5 = 0x10
r6 = 0x20
r7 = 0x40
r8 = 0x80
build_cmd_relay = array('B', [0x00]*7)
stx = build_cmd_relay[0]
cmd = build_cmd_relay[1]
msk = build_cmd_relay[2]
param1 = build_cmd_relay[3]
param2 = build_cmd_relay[4]
chk = build_cmd_relay[5]
etx = build_cmd_relay[6]
stx = 0x04 # start byte
cmd = cmd_relay & 0xFF # command byte
msk = which_relay # mask byte to select relay
chk = -(stx + cmd + msk + param1 + param2) + 1 # checksum of byte packet
etx = 0x0F # end byte
return build_cmd_relay.tobytes()
def send_cmd_relay(cmd_relay_packet):
"""Send or receive command packet from relay card"""
ser_relay.reset_output_buffer()
ser_relay.reset_input_buffer()
ser_relay.write(cmd_relay_packet)
def switch_relay_on(which_relay):
"""Switch on one or more relays"""
built_packet_relay = build_cmd_relay(0x11, which_relay)
resp = send_cmd_relay(built_packet_relay)
return resp
def switch_relay_off(which_relay):
built_packet_relay = build_cmd_relay(0x12, which_relay)
resp = send_cmd_relay(built_packet_relay)
return resp
# Save data: current, voltage, and power
def data_file(log_file, log_file_header):
"""Docstring"""
if os.path.exists(log_file) is not True:
with open(log_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(log_file_header)
return log_file
def data_point(inputs: list):
"""Organizes data for export to excel"""
opv = '1'
timenow = strftime("%#m/%#d/%Y %#H:%M")
volts = inputs[0]
current = inputs[1]
power = inputs[2]
data_point = [opv, timenow, volts, current, power]
return data_point
def write_data_tofile(data_point):
global sample_id
if data_point is not None:
sample_id += 1
sample_id_lst = [sample_id]
log_file = data_file()
with open(log_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(sample_id_lst + data_point)
# IV curve measurement
def open_circ():
"""Open circuit voltage measurement"""
set_mode(mode_cc) # set operation mode to CC
time.sleep(.250)
set_CC_current(cc_current=0) # set CC mode current to 0 amps
time.sleep(.1)
oc_vals = get_input_values() # read open circuits levels
oc_data_point = data_point(oc_vals) # create data point for open circuit measurement
voc = oc_data_point[2] # open circuit voltage measurement
print('Open circuit voltage: ', voc)
write_data_tofile(oc_data_point) # write data to file
return voc
def iv_curve(voc):
"""Measure intermediate current voltage points"""
set_mode(mode_cv) # set operation mode to CC
time.sleep(.250)
volt_step = voc
while volt_step > 0.5:
set_CV_volts(volt_step)
time.sleep(.1)
curve_vals = get_input_values()
curve_data_point = data_point(curve_vals)
print('voltage, current, power: ', curve_data_point[2], curve_data_point[3], curve_data_point[4])
write_data_tofile(curve_data_point)
new_volt_step = curve_data_point[2] - 1.0
volt_step = new_volt_step
pass
def short_circ():
"""Measure short circuit current (nearest to 0 volts)"""
set_mode(mode_cv)
time.sleep(.250)
set_CV_volts(0.1)
time.sleep(.250)
sc_vals = get_input_values()
sc_data_point = data_point(sc_vals)
jsc = sc_data_point[3]
print('Short circuit current: ', jsc)
write_data_tofile(sc_data_point)
def sweep():
"""Measure entire IV curve"""
set_enable_load(True) # turn input ON
time.sleep(.250)
print('Begin IV curve measurement')
voc = open_circ() # measure open circuit voltage
iv_curve(voc) # measure iv curve
short_circ() # measure short circuit current
time.sleep(.250)
set_enable_load(False) # turn input OFF
#______________________________________________________________________________
def process_data(in_file=str, out_file=str, opv_num=str):
"""Process data for each IV curve measurement"""
out_file_header = ['opv', 'curve_id', 'time', 'hour', 'voc', 'jsc', 'mpp', 'ff']
data_file(out_file, out_file_header)
df = pd.read_csv(in_file)
out_file_header = ['opv', 'curve_id', 'time', 'hour', 'voc', 'jsc', 'mpp', 'ff']
curve_id_count = 1
curve = df.loc[df['curve_id'] == curve_id_count]
while curve is not None:
opv = opv_num
time = curve['time'].iloc[0] # start time of IV curve measurement
hour = float(time[-2] + time[-1])/60.0 + float(time[-5] + time[-4])
voc = curve['volts'].max()
jsc = curve['current'].max()
mpp = curve['power'].max()
ff = mpp / (voc * jsc)
data_point = [opv, curve_id_count, time, hour, voc, jsc, mpp, ff]
with open(out_file, mode='a',newline='') as the_file:
writer = csv.writer(the_file, dialect='excel')
writer.writerow(data_point)
new_curve_id_count = curve_id_count + 1
curve_id_count = new_curve_id_count
curve = df.loc[df['curve_id'] == curve_id_count]
pass
return
def match_env(opv_in_file, env_in_file, out_file):
"""Match corresponding environmental measurement to IV curve measurement"""
df_opv = pd.read_csv(opv_in_file)
df_env = pd.read_csv(env_in_file)
# df_env['TIMESTAMP'] = pd.to_datetime(df_env['TIMESTAMP'],format='%m/%d/%y %H:%M').drop_duplicates() # 10-27-19 to 10-31-19
df_env['TIMESTAMP'] = | pd.to_datetime(df_env['TIMESTAMP'],format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime as dt
import collections
import sklearn.preprocessing
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.animation as animation
import tempfile
from PIL import Image
first_date = dt.date(2020, 3, 1)
## Main
def main():
df = download_data()
countries = get_all_countries(df, min_population=100000)
plot_by_country(df=df, ctype='deaths')
death_rate_chart(df=df, countries=countries, ctype='deaths', num_to_display=30)
## Visualisation
def death_rate_chart(df, countries, ctype, num_to_display=None):
results = pd.DataFrame(index=pd.date_range(start=first_date, end='today'), columns=countries)
for country in countries:
sr = country_series(df, country, ctype, cumsum=True, log=False)
sr /= df[df.countriesAndTerritories == country].iloc[0].popData2018
results[country] = sr
results = results.fillna(0)
sr = results.iloc[-1]
sr = sr.sort_values()
if isinstance(num_to_display, int):
sr = sr[-num_to_display:]
title = '%s per 100,000 for top %d countries' % (ctype.title(), num_to_display)
else:
title = '%s per 100,000' % (ctype.title())
sr *= 100000
l = len(sr)
labels = clean_labels(sr.index)
spacing = [(1/l)*i for i in range(l)]
colours = matplotlib.cm.hsv(sr / float(max(sr)))
fig, ax = plt.subplots()
plt.barh(spacing, width=sr.to_list(), height=(1/l)*0.92, tick_label=labels, color='orange')
plt.yticks(fontsize=8)
plt.title(title)
plt.xlabel(ctype.title())
# plt.show()
plt.savefig('bar_chart.png', bbox_inches='tight', dpi=300)
def plot_by_country(df, ctype):
df = normalised_progression_by_country(df, get_all_countries(df), ctype)
countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name='admin_0_countries')
cmap = matplotlib.cm.get_cmap('Spectral')
saved_figs = []
limit=5
for i in range(df.shape[0]):
tfile = tempfile.TemporaryFile()
ax = plt.axes(projection=ccrs.PlateCarree(), label=str(i))
for country in shpreader.Reader(countries_shp).records():
c = clean_country(country.attributes['NAME_LONG'])
if c == None:
rgba = (0.5, 0.5, 0.5, 1.0)
else:
rgba = cmap(df[c][i])
ax.add_geometries([country.geometry], ccrs.PlateCarree(), facecolor=rgba, label=country.attributes['NAME_LONG'])
plt.title(str(df.index[i]).split(' ')[0])
plt.savefig(tfile, dpi=400, bbox_inches='tight')
saved_figs.append(tfile)
plt.close()
fig = plt.figure()
ims = []
for temp_img in saved_figs:
X = Image.open(temp_img)
ims.append([plt.imshow(X, animated=True)])
ani = animation.ArtistAnimation(fig, ims, interval=800, blit=True, repeat_delay=1000)
plt.axis('off')
plt.tight_layout(pad=0)
# plt.show()
ani.save('animation.gif', writer='imagemagick', fps=2, dpi=400)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=2, metadata=dict(artist='Me'), bitrate=100000)
# ani.save('/Users/daniel/Desktop/animation.mp4', writer=writer, dpi=400)
## Data acquisition and processing
def clean_labels(labels):
results = []
for label in labels:
if label == 'Cases_on_an_international_conveyance_Japan':
results.append('Japan')
elif label == 'United_States_of_America':
results.append('United States')
else:
results.append(label.replace('_', ' '))
return results
def download_data():
covid_raw_pd = pd.read_csv('https://opendata.ecdc.europa.eu/covid19/casedistribution/csv')
# covid_raw_pd = pd.read_csv('/Users/daniel/Downloads/cv.csv')
cols_to_drop = ['day', 'month', 'year', 'geoId', 'countryterritoryCode', 'continentExp']
covid_raw_pd = covid_raw_pd[covid_raw_pd.columns.drop(cols_to_drop)]
covid_raw_pd['dateRep'] = pd.to_datetime(covid_raw_pd['dateRep'], format=r'%d/%m/%Y')
return covid_raw_pd
def get_all_countries(df, min_population=None):
if isinstance(min_population, int):
df = df[df.popData2018 >= min_population]
return df.loc[:, 'countriesAndTerritories'].drop_duplicates()
def get_eu_countries():
return | pd.Series(['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
cardData = pd.read_csv('CardData.csv', header=0, encoding='utf-8-sig')
coinData = pd.read_csv('CoinData.csv', header=0, encoding='utf-8-sig')
baseCost = pd.read_csv('BaseCost.csv', header=0, encoding='utf-8-sig')
numCards = len(cardData.index)
numCoins = len(coinData.index)
baseCost1 = baseCost.at[0,'CPU Price'] + baseCost.at[0, 'Mobo Price'] + baseCost.at[0, 'SSD Price'] + baseCost.at[0, 'Case Price'] + baseCost.at[0, 'PSU Price'] + baseCost.at[0, 'RAM Price']
multCard = int(sys.argv[1])
powerCost = pd.Series()
for i in range (0, numCards):
powerCost2 = pd.Series(((cardData.at[i, 'Power Consumption'] * 24 * baseCost.at[0, 'Power Cost']/1000) * multCard), index =[i])
powerCost = powerCost.append(powerCost2)
costEquip = pd.Series()
for i in range (0, numCards):
costEquip1 = pd.Series(baseCost1 + (cardData.at[i, 'Cost'] * multCard), index=[i])
costEquip = costEquip.append(costEquip1)
costEquipdf = | pd.DataFrame(costEquip, columns=['Initial Investment']) | pandas.DataFrame |
# coding=utf-8
"""
Porównanie skuteczności metod uczenia maszynowego.
Klasyfikacja - czy klient banku spłaci pożyczkę.
Źródło danych: http://archive.ics.uci.edu/ml/machine-learning-databases/00350/
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import OneHotEncoder
import ssn
from utils import fibonacci_range, prep_data, prep_test_data_y, prep_test_data_x
VERBOSE = True
def load_dataset(path="bank.csv", verbose=True):
print("Loading data...")
dataset = pd.read_csv(path, dtype=str)
if verbose:
print(dataset.head())
print(dataset.describe())
return dataset
def split_dataset(dataset):
print("Spliting dataset...")
X = dataset.drop(['deposit', 'age', 'balance'], axis=1)
y = dataset['deposit']
z = dataset[['age', 'balance']]
return X, y, z
if __name__ == "__main__":
df = load_dataset(verbose=VERBOSE)
X, y, z = split_dataset(df)
z = z.replace(" ", "0.0", regex=True)
z = z.apply(pd.to_numeric)
X = pd.DataFrame(OneHotEncoder().fit_transform(X).toarray())
y = pd.DataFrame(OneHotEncoder().fit_transform(np.array(y).reshape(-1, 1)).toarray()).loc[:, 1]
X = pd.concat([X, z], axis=1)
out = | pd.concat([X, z, y], axis=1) | pandas.concat |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 04:25:58 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import subprocess as sp
import tempfile
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
def load_case(path):
df = pd.read_csv(path, header=None, sep=' ',
names=['date', 'time', 'event', 'skip', 'Model'],
parse_dates=[['date', 'time']])
df = df[['date_time', 'event', 'Model']]
df['timestamp'] = df['date_time']
df = df.drop('date_time', axis=1)
wls = df.pivot_table(values='timestamp', index=['Model'],
columns='event', aggfunc='first').reset_index()
for col in ['Started', 'Queued', 'Finished']:
wls[col] = wls[col].str[:-1]
wls[col] = pd.to_datetime(wls[col])
wls['queuing'] = wls.Started - wls.Queued
wls['JCT'] = wls.Finished - wls.Queued
# for convinent
wls['No'] = pd.to_numeric(wls['Model'].str.rpartition('.')[2])
return wls
def load_trace(path, fifo=True):
df = pd.read_csv(path)
df = df.sort_values(by='submit_time')
if fifo:
models = defaultdict(dict)
curr = 0
for idx, row in df.iterrows():
if curr < row['submit_time']:
curr = row['submit_time']
models[idx]['Queued'] = row['submit_time']
models[idx]['Started'] = curr
curr += row['duration']
models[idx]['Finished'] = curr
data = [
{
"Model": '{model_name}.tf.{iterations}iter.{job_id}'.format(**df.iloc[idx]),
"Finished": m['Finished'],
"Queued": m['Queued'],
"Started": m['Started'],
"queuing": m['Started'] - m['Queued'],
"JCT": m['Finished'] - m['Queued']
}
for idx, m in models.items()
]
df = pd.DataFrame(data)
else:
data = [
{
"Model": f"{row.model_name}.tf.{row.iterations}iter.{row.job_id}",
"Finished": row.submit_time + row.duration,
"Queued": row.submit_time,
"Started": row.submit_time,
"queuing": 0,
"JCT": row.duration
}
for idx, row in df.iterrows()
]
df = pd.DataFrame(data)
for col in ['Finished', 'Queued', 'Started', 'queuing', 'JCT']:
df[col] = pd.to_timedelta(df[col], unit='s')
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_refine(pathdir):
# load preempt select events
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'preempt_select_sess', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['preempt_select_sess'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# convert UTC from server to local
df['timestamp'] = df.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sess2Model = {}
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
sess2Model[m.group('sess')] = fpath.name.rstrip('.output')
# add model name info to it
df['Model'] = df.Sess.map(sess2Model)
# make sure every session is covered
assert df.Model.isnull().sum() == 0
# for convinent
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_serverevents(pathdir):
# sess handle -> lane id
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'lane_assigned', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['lane_assigned'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# sess handles are unique
assert len(df.Sess.unique()) == len(df.Sess)
# make Sess as index so we can lookup
df = df.set_index('Sess')
# add a new column
df['Model'] = None
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
df.loc[m.group('sess'), 'Model'] = fpath.name.rstrip('.output')
# reset index so we can use that later
df = df.reset_index()
return df
def refine_time_events(df, sevts):
"""Return a copy of df"""
assert df.Model.is_unique
assert sevts.Model.is_unique
df = df.set_index('Model').sort_index()
sevts = sevts.set_index('Model').sort_index()
# check sevts contains all needed info
assert sevts.index.equals(df.index)
# Server logs in UTC, convert to local
sevts['Started'] = sevts.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sevts = sevts.drop(['timestamp'], axis=1)
df['Queued'] = df.Started
df = df.drop(['Started'], axis=1)
# set Model as index for both as then and then concat
df = pd.concat([df, sevts], axis=1)
# update queuing
df['queuing'] = df.Started - df.Queued
return df.reset_index()
def plot_timeline(df, colors=None, **kwargs):
ax = kwargs.pop('ax', None)
if ax is None:
ax = plt.gca()
# sort df by no
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
df = df.sort_values(by='No')
offset = df.Queued.min()
qmin = (df.Queued - offset) / pd.Timedelta(1, unit='s')
xmin = (df.Started - offset) / pd.Timedelta(1, unit='s')
xmax = (df.Finished - offset) / pd.Timedelta(1, unit='s')
if colors is None:
color_cycle = ax._get_lines.prop_cycler
colors = [next(color_cycle)['color'] for _ in qmin]
for (_, row), q, left, right, color in zip(df.iterrows(), qmin, xmin, xmax, colors):
barheight = 0.8
# queuing time
ax.barh(row.No, left - q, barheight, q, color='#b6b6b6')
# run time
bar = ax.barh(row.No, right - left, barheight, left,
color=color,
label='#{3}: {0}'.format(*row.Model.split('.')))
if 'LaneId' in row:
ax.text(right + 2, row.No, f'Lane {row.LaneId}',
ha='left', va='center', fontsize=3)
# ax.legend()
ax.set_xlabel('Time (s)')
# ax.set_ylabel('Workload')
ax.yaxis.set_ticks([])
return bar, colors
def plot_refine(ax, df, refine_data):
# so that we can access job using no
df = df.set_index('No')
# for every preempt event pair, mask jobs that's not the left event's switch_to job
offset = df.Queued.min()
refine_data['Ntime'] = (refine_data['timestamp'] - offset) / pd.Timedelta(1, unit='s')
# also convert df.Queued to relative time
df['Started'] = (df.Started - offset) / pd.Timedelta(1, unit='s')
df['Finished'] = (df.Finished - offset) / pd.Timedelta(1, unit='s')
bars = []
# group refine_data by laneId
for laneId, grp in refine_data.groupby('LaneId'):
magic = grp.iterrows()
next(magic)
for (_, left), (_, right) in zip(grp.iterrows(), magic):
for no in df.index.unique():
if no == left.No:
continue
if laneId != df.loc[no].LaneId:
continue
l = max(df.loc[no].Started, left.Ntime)
r = min(df.loc[no].Finished, right.Ntime)
if l >= r:
continue
# make sure left and right within job no's started and finished
# mask from left to right
bars.append(ax.barh(no, r - l, 0.5, l, color='#ffffff', edgecolor='#ffffff'))
return bars
def plot_lanes(refined_df, **kwargs):
lanes = refined_df.groupby(['LaneId', 'LaneSize']).agg({
'Queued': 'first',
'Finished': 'last'
}).rename(columns={'Queued':'Started'}).reset_index()
tables = []
for col in ['Started', 'Finished']:
t = lanes.pivot_table(values='LaneSize', columns='LaneId', index=[col], aggfunc='first')
tables.append(t)
lanes2 = | pd.concat(tables) | pandas.concat |
import argparse
from contextlib import redirect_stdout
import os
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import configs
import plot_utils
# Set matplotlib font size
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def create_fg_plots(fg_results_dir, output_dir):
fg_results_df = pd.DataFrame(None, columns=['model_name', 'name', 'acc'])
for model_spec in configs.model_specs:
model_name = model_spec['name']
results_fp = os.path.join(fg_results_dir, model_name + ".pkl")
if not os.path.exists(results_fp):
print("WARNING: did not find results for model %s at %s" % (model_name, results_fp))
continue
results_df = pd.read_pickle(results_fp)
fg_results_df = | pd.concat([fg_results_df, results_df], axis=0, ignore_index=True) | pandas.concat |
import json
import os
import warnings
import casadi as ca
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from scipy.signal import chirp
from skmid.integrator import RungeKutta4
from skmid.models import DynamicModel
from skmid.models import generate_model_attributes
@pytest.fixture
def load_non_linear_model_data():
"""Generate input signal"""
CWD = os.getcwd()
DATA_DIR = "data"
SUB_DATA_DIR = "non_linear_model"
U = pd.read_csv(
filepath_or_buffer=os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "u_data.csv"),
index_col=0,
)
Y = pd.read_csv(
filepath_or_buffer=os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "y_data.csv"),
index_col=0,
)
# reading the data from the file
with open(
os.path.join(CWD, DATA_DIR, SUB_DATA_DIR, "settings.json"), mode="r"
) as j_object:
settings = json.load(j_object)
return (U, Y, settings)
# test f(x,u) with multiple input, multiple size, list
@pytest.fixture
def generate_input_signal():
"""Generate input signal"""
N = 2000 # Number of samples
fs = 500 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(
data={
"chirp": 2 * chirp(t, f0=1, f1=10, t1=5, method="logarithmic"),
"noise": 2 * np.random.random(N),
},
index=t,
)
return (df_input, fs)
@pytest.fixture
def generate_step_signal():
"""Generate Step input signal"""
N = 1000 # Number of samples
fs = 25 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(index=t).assign(
step=lambda x: np.where(x.index < t[int(N / 4)], 0, 1)
)
return (df_input, fs)
@pytest.fixture
def generate_inpulse_signal():
"""Generate Impulse input signal"""
N = 500 # Number of samples
fs = 50 # Sampling frequency [hz]
t = np.linspace(0, (N - 1) * (1 / fs), N)
df_input = pd.DataFrame(index=t).assign(inpulse=np.zeros(N))
df_input.iloc[0] = 1
return (df_input, fs)
class TestRungeKutta4:
"""Test class for function generate_model_parameters."""
def test_model_with_states(self):
"""Test simulation with model dx=f(x)."""
(x, _, _) = generate_model_attributes(
state_size=1, input_size=0, parameter_size=0
)
# initialize first-order model
tau = 1
sys = DynamicModel(state=x, model_dynamics=[-(1 / tau) * x])
n_steps = 50
x0 = [1]
rk4 = RungeKutta4(model=sys)
_ = rk4.simulate(initial_condition=x0, n_steps=n_steps)
df_X = rk4.state_sim_
df_Y = rk4.output_sim_
# check equality of dataframe
| pdt.assert_frame_equal(df_X, df_Y) | pandas.testing.assert_frame_equal |
"""
kbible.py - base bible object and commands
"""
import pandas as pd
import yaml
import os
import subprocess
__author__ = "<NAME> <<EMAIL>>"
__docformat__ = "restructuredtext en"
class KBible(object):
""" Bible text object """
def __init__(self, version="개역한글판성경", debug=False, **kwargs):
""" read or parse bible text """
self._biblelist = []
self._versionlist = {}
this_dir, this_filename = os.path.split(__file__)
listname = os.path.join(this_dir, "data", u"book_names.csv")
self._table = pd.read_csv(listname, index_col=0)
self.add(version, **kwargs)
def add(self, version, **kwargs):
""" add different version """
b = read_full_bible(version_name=version, **kwargs)
self._biblelist.append(b)
self._versionlist[version] = len(self._biblelist) - 1
def delete(self, version):
""" remove version """
if (version in self._versionlist) and (len(self._versionlist) > 1):
i = self._versionlist[version]
del self._versionlist[version]
del self._biblelist[i]
else:
print('... not found or only have one bible version: {}'.format(version))
def save(self, version="개역한글판성경"):
""" save bible text as compressed csv """
if version in self._versionlist:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version + ".csv.gz")
b = self._biblelist[self._versionlist[version]]
b.to_csv(filename, compression='gzip')
print('... save file: {}'.format(filename))
def get(self, version=""):
""" return bible as pandas """
if version == "":
return self._biblelist[0]
try:
return self._biblelist[self._versionlist[version]]
except:
print('... no bible version: {}'.format(version))
return []
def bystr(self, sstr, form="md"):
""" extract bible verse """
if form == "pd":
res = pd.DataFrame()
for b in self._biblelist:
res = pd.concat([res, extract_bystr(b, sstr, form="pd")], axis=0)
return res
else:
msg = ""
for b in self._biblelist:
msg = msg + extract_bystr(b, sstr, form=form) + '\n'
return msg
def search(self, sstr, form="md", regex=False):
""" search string in bible """
res = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import fasttext
import pandas as pd
import math
import re
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Folder Location
DIR = os.path.dirname(os.path.realpath(__file__))
# Read Raw Corpus data
dat = pd.read_csv(DIR + '/kosacCorpus.csv')
# column labels of data frame
# Rename Columns to snake case
dat.columns = [
re.sub('^_|_$', '', re.sub('\\W+', '_', column))
for column in dat.columns
]
# Select sentiment columns and raw sentence
dat = dat[[
'polarity', 'intensity', 'subjectivity_type', 'subjectivity_polarity', 'raw_sentence', 'confident'
]]
# Filter for High Confidence
dat = dat[dat['confident'] == True]
# Exclude NaN and None values
# values are missing?
dat = dat[
(
pd.isnull(dat.polarity) == False
| dat.polarity.str.contains('None')
)
& (
| pd.isnull(dat.intensity) | pandas.isnull |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = | DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.]) | pandas.core.api.DataFrame |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import sys, os, site, zipfile, math, time, json, io
import googlemaps, urllib, shapely, shutil, requests
import xml.etree.ElementTree as ET
from glob import glob
from urllib.error import HTTPError
from urllib.request import URLError
from http.client import IncompleteRead
from zipfile import BadZipFile
from tqdm import tqdm, trange
from warnings import warn
###########################
### IMPORT PROJECT PATH ###
import pvvm.settings
revmpath = pvvm.settings.revmpath
datapath = pvvm.settings.datapath
apikeys = pvvm.settings.apikeys
nsrdbparams = pvvm.settings.nsrdbparams
#####################
### Imports from pvvm
import pvvm.toolbox
import pvvm.io
#######################
### DICTS AND LISTS ###
#######################
isos = ['CAISO', 'ERCOT', 'MISO', 'PJM', 'NYISO', 'ISONE']
resolutionlmps = {
('CAISO', 'da'): 60, ('CAISO', 'rt'): 5,
('ERCOT', 'da'): 60, ('ERCOT', 'rt'): 5,
('MISO', 'da'): 60, ('MISO', 'rt'): 60,
('PJM', 'da'): 60, ('PJM', 'rt'): 60,
('NYISO', 'da'): 60, ('NYISO', 'rt'): 5,
('ISONE', 'da'): 60, ('ISONE', 'rt'): 60,
}
################
### DOWNLOAD ###
################
###############
### General use
def constructpayload(**kwargs):
out = []
for kwarg in kwargs:
out.append('{}={}'.format(kwarg, kwargs[kwarg]))
stringout = '&'.join(out)
return stringout
def constructquery(urlstart, **kwargs):
out = '{}{}'.format(urlstart, constructpayload(**kwargs))
return out
def stampify(date, interval=pd.Timedelta('1H')):
datetime = pd.Timestamp(date)
if interval == pd.Timedelta('1H'):
dateout = '{}{:02}{:02}T{:02}'.format(
datetime.year, datetime.month,
datetime.day, datetime.hour)
elif interval == pd.Timedelta('1D'):
dateout = '{}{:02}{:02}'.format(
datetime.year, datetime.month,
datetime.day)
return dateout
def download_file_series(urlstart, urlend, fileseries, filepath,
overwrite=False, sleeptime=60, numattempts=200, seriesname=True):
"""
Example
-------
You want to download a list of files at urls = [
'http://www.test.com/foo001.csv', 'http://www.test.com/foo002.csv'].
Then:
urlstart = 'http://www.test.com/foo'
urlend = '.csv'
fileseries = ['001', '002']
If you want the files to be named 'foo001.csv', use seriesname=False
If you want the files to be named '001.csv', use seriesname=True
"""
filepath = pvvm.toolbox.pathify(filepath, make=True)
### Make lists of urls, files to download, and filenames
urls = [(urlstart + file + urlend) for file in fileseries]
todownload = [os.path.basename(url) for url in urls]
if seriesname == True:
filenames = [os.path.basename(file) + urlend for file in fileseries]
else:
filenames = todownload
### Get the list of downloaded files
downloaded = [os.path.basename(file) for file in glob(filepath + '*')]
### Remake the list if overwrite == False
if overwrite == False:
filestodownload = []
urlstodownload = []
fileseriesnames = []
for i in range(len(filenames)):
if filenames[i] not in downloaded:
filestodownload.append(todownload[i])
urlstodownload.append(urls[i])
fileseriesnames.append(filenames[i])
elif overwrite == True:
filestodownload = todownload
urlstodownload = urls
fileseriesnames = filenames
### Download the files
for i in trange(len(urlstodownload)):
### Attempt the download
attempts = 0
while attempts < numattempts:
try:
urllib.request.urlretrieve(
urlstodownload[i], filepath + fileseriesnames[i])
break
except (HTTPError, IncompleteRead, EOFError) as err:
print(urlstodownload[i])
print(filestodownload[i])
print('Rebuffed on attempt # {} at {} by "{}".'
'Will retry in {} seconds.'.format(
attempts, pvvm.toolbox.nowtime(), err, sleeptime))
attempts += 1
time.sleep(sleeptime)
###########################
### Geographic manipulation
def rowlatlon2x(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
x = math.cos(latrad) * math.cos(lonrad)
return x
def rowlatlon2y(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
y = math.cos(latrad) * math.sin(lonrad)
return y
def rowlatlon2z(row):
latrad = row['latitude'] * math.pi / 180
z = math.sin(latrad)
return z
############
### ISO LMPs
"""
Note: These scripts worked as of early 2018, but MISO, PJM, and NYISO have since
changed their websites, and CAISO has removed data prior to 20150303. Scripts
are included here for documentary purposes and as a resource for future
data collection, but are unlikely to work given ISO website changes.
"""
def download_caiso_lmp_allnodes(market, start, filepathout,
product='LMP', numattempts=200, waittime=10):
urlstart = 'http://oasis.caiso.com/oasisapi/GroupZip?'
columnsout = [
'INTERVALSTARTTIME_GMT', 'NODE', 'MW',
'OPR_DT', 'OPR_HR', 'OPR_INTERVAL']
if market in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif market in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
starttimestamp = pd.Timestamp(start)
endtimestamp = starttimestamp + interval
startdatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
enddatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
endtimestamp.year, endtimestamp.month,
endtimestamp.day, endtimestamp.hour)
if interval == pd.Timedelta('1D'):
fileout = '{}{:02}{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day)
elif interval == pd.Timedelta('1H'):
fileout = '{}{:02}{:02}T{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
url = constructquery(
urlstart,
groupid='{}_LMP_GRP'.format(market),
startdatetime=startdatetime,
enddatetime=enddatetime,
version=1,
resultformat=6)
attempts = 0
while attempts < numattempts:
try:
# if product.lower() in ['mcc', 'mce', 'mcl']:
# if (market.upper() in ['DAM', 'RUC']) and (starttimestamp.year >= 2016):
# if market.upper() in ['DAM', 'RUC']:
if ((product.lower() in ['mcc', 'mce', 'mcl'])
or ((market == 'DAM') and product.lower() == 'lmp')):
zip_file = zipfile.ZipFile(io.BytesIO(
urllib.request.urlopen(url).read()))
for csv_file in zip_file.infolist():
if csv_file.filename.endswith(
'{}_v1.csv'.format(product.upper())):
df = pd.read_csv(zip_file.open(csv_file.filename))
else:
df = pd.read_csv(url, compression='zip')
dfout = df[df['LMP_TYPE'] == product.upper()][columnsout]
dfout.to_csv(
'{}{}'.format(filepathout, fileout),
columns=columnsout,
index=False,
compression='gzip')
return dfout
except (
URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, KeyError, HTTPError, UnboundLocalError) as error:
print(
'Error for {} on attempt {}/{}: {}'.format(
start, attempts, numattempts, error),
# end='\r',
)
attempts += 1
time.sleep(waittime)
if attempts >= numattempts:
raise URLError('{}{}'.format(filepathout, fileout))
def download_lmps(year, iso, market, overwrite=False, sleeptime=60,
product='LMP', submarket=None, numattempts=200, subset=None,
waittime=10, filepath=None):
"""
Inputs
------
subset: None or slice()
Notes
-----
* ERCOT LMPs more than 30 days old must be requested from ERCOT.
Requests can be filed at http://www.ercot.com/about/contact/inforequest.
Files should be placed in the folder
revmpath + 'ERCOT/in/lmp/{}/{}/'.format(market, year)
where year is the year of the timestamp within the files.
Note that the date in the filename for day-ahead LMPs is the date before
the timestamps within the file: for example, file
('cdr.00012328.0000000000000000.20151231.125905514.DAMHRLMPNP4183_csv')
contains timestamps for 20160101, and should be placed in the 2016 folder.
"""
### Normalize inputs
iso = iso.upper()
market = market.lower()
year = int(year)
assert market in ['da', 'rt']
assert iso in ['CAISO', 'MISO', 'PJM', 'NYISO', 'ISONE']
### Set file structure
if filepath is None:
filepath = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if not os.path.exists(filepath): os.makedirs(filepath)
### Adjust inputs for different isos
urlstart = {
'ISONE': {
'da': 'https://www.iso-ne.com/static-transform/csv/histRpts/da-lmp/WW_DALMP_ISO_',
'rt': 'https://www.iso-ne.com/static-transform/csv/histRpts/rt-lmp/lmp_rt_final_'},
'MISO': {
# 'da': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
# 'rt': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
'da': 'https://docs.misoenergy.org/marketreports/',
'rt': 'https://docs.misoenergy.org/marketreports/',
},
'PJM': {
'da': 'http://www.pjm.com/pub/account/lmpda/',
'rt': 'http://www.pjm.com/pub/account/lmp/'},
'NYISO': {
'da': 'http://mis.nyiso.com/public/csv/damlbmp/',
'rt': 'http://mis.nyiso.com/public/csv/realtime/'},
}
urlend = {
'ISONE': {'da': '.csv', 'rt': '.csv'},
'MISO': {'da': '_da_lmp.csv', 'rt': '_rt_lmp_final.csv'},
'PJM': {'da': '-da.zip', 'rt': '.zip'},
'NYISO': {'da': 'damlbmp_gen_csv.zip', 'rt': 'realtime_gen_csv.zip'},
}
files = {
'ISONE': pvvm.toolbox.makedays(year),
'MISO': pvvm.toolbox.makedays(year),
'PJM': pvvm.toolbox.makedays(year),
'NYISO': ['{}{:02}01'.format(year, month) for month in range(1,13)]
}
### Download files
if iso == 'ISONE':
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'MISO':
urls = [(urlstart[iso][market] + file + '_da_expost_lmp.csv')
if (int(file) >= 20150301) and (market == 'da')
else (urlstart[iso][market] + file + urlend[iso][market])
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'PJM':
da_updated = {
'20151201': '-da_updated.zip',
'20150930': '-da_updated.zip',
'20140617': '-da_updated.zip',
'20150616': '-da_updated.zip',
'20150615': '-da_updated.zip',
'20150614': '-da_updated.zip',
'20140613': '-da_updated.zip',
'20150603': '-da_updated.zip',
'20150602': '-da_updated.zip',
'20150601': '-da_updated.zip',
'20150409': '-da_updated.zip',
'20140327': '-da_updated.zip',
'20111012': '-da_update.zip',
'20111011': '-da_update.zip',
}
rt_updated = {
'20170116': '_updated.zip',
'20170115': '_updated.zip',
'20170114': '_updated.zip',
'20170113': '_updated.zip',
'20160923': '_updated.zip',
'20160417': '_updated.zip',
'20160416': '_updated.zip',
'20160415': '_updated.zip',
'20151110': '_updated.zip',
'20150929': '_updated.zip',
'20150901': '_updated.zip',
'20150831': '_updated.zip',
'20150601': '_updated.zip',
'20150504': '_updated.zip',
'20150427': '_updated.zip',
'20150407': '_updated.zip',
'20150310': '_updated.zip',
'20150309': '_updated.zip',
'20150201': '_updated.zip',
'20150131': '_updated.zip',
'20150130': '_updated.zip',
'20141112': '_updated.zip',
'20141023': '_updated.zip',
'20141013': '_updated.zip',
'20140805': '_updated.zip',
'20140710': '_updated.zip',
'20140507': '_updated.zip',
'20140128': '_updated.zip',
'20131125': '_updated.zip',
'20131120': '_updated.zip',
'20130424': '_updated.zip',
'20130307': '_updated.zip',
'20121109': '_updated.zip',
'20121023': '_updated.zip',
'20121004': '_updated.zip',
'20121003': '_updated2.zip',
'20121001': '_updated.zip',
'20110914': '_updated.zip',
'20110829': '_updated.zip',
'20110617': '_updated.zip',
'20110306': '_updated.zip',
'20110305': '_updated.zip',
'20110304': '_updated.zip',
'20101005': '_updated.zip',
'20100526': '_updated.zip',
'20100201': '_updated.zip',
'20100129': '_updated.zip',
'20100125': '_updated.zip',
'20080904': '_updated.zip',
'20080413': '_updated.zip',
'20080305': '_updated.zip',
'20080215': '_updated.zip',
'20080214': '_updated.zip',
'20071002': '_updated.zip',
'20070822': '_updated.zip',
}
if market == 'da':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/day-ahead/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171109 for day-ahead
# urls = [(urlstart[iso][market] + file + '-da.csv')
# if int(file) >= 20171109
# else (urlstart[iso][market] + file + '-da.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + da_updated[file])
if file in da_updated.keys()
else (urlstart[iso][market] + file + '-da.zip')
for file in files[iso]]
elif market == 'rt':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/real-time/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171212 for real-time
# urls = [(urlstart[iso][market] + file + '.csv')
# if int(file) >= 20171212
# else (urlstart[iso][market] + file + '.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + rt_updated[file])
if file in rt_updated.keys()
else (urlstart[iso][market] + file + '.zip')
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'NYISO':
### NYISO files are zipped by month; put them in a separate folder
zippath = '{}/in/lmp/{}-zip/'.format(iso, market)
if not os.path.exists(zippath): os.makedirs(zippath)
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=zippath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
### Unzip files
zips = [(zippath + file + urlend[iso][market]) for file in files[iso]]
for i in trange(len(zips)):
zip_ref = zipfile.ZipFile(zips[i], 'r')
zip_ref.extractall(filepath)
zip_ref.close()
elif iso == 'CAISO':
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
if submarket in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif submarket in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
### Set output filepath
filepath = '{}/in/{}/{}/'.format(iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepath = '{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if not os.path.exists(filepath): os.makedirs(filepath)
queries = pd.date_range(
start=pd.Timestamp('{}-01-01T00:00'.format(year)),
end=(pd.Timestamp('{}-01-01T00:00'.format(year+1)) - interval),
freq=interval)
### Initialize error container and subset if necessary
errors = []
if subset == None: subset = slice(None)
# already_downloaded = glob('{}{}*'.format(filepath, year))
for query in tqdm(queries[subset]):
# if '{}{}.gz'.format(filepath, stampify(query)) not in already_downloaded:
if interval == pd.Timedelta('1D'):
fileout = stampify(query)[:-3]
elif interval == pd.Timedelta('1H'):
fileout = stampify(query)
if not os.path.exists('{}{}.gz'.format(filepath, fileout)):
# if overwrite == False:
# if os.path.exists('{}{}.gz'.format(filepath, stampify(query))):
# break
try:
download_caiso_lmp_allnodes(
market=submarket, start=str(query), filepathout=filepath,
product=product, numattempts=numattempts, waittime=waittime)
except (URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, HTTPError) as error:
errors.append(error)
print(error)
if len(errors) > 0:
pd.Series(errors).to_csv(
'{}__Errors__{}.csv'.format(filepath, time.strftime('%Y%m%dT%H%M%S')),
index=False)
################
### NODALIZE ###
def nodalize(year, market, iso,
filepathin=None, filepathout=None, nodesfile=None,
product='LMP', submarket=None, fillmissinghour=True):
"""
"""
### Set defaults if necessary
if iso.upper() == 'CAISO':
if filepathin == None:
filepathin = revmpath+'{}/in/{}/{}'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathin = revmpath+'{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if filepathout == None:
filepathout = revmpath+'{}/io/{}-nodal/{}/'.format(
iso, product.lower(), market)
if (market == 'rt') and (submarket == 'RTM'):
filepathout = revmpath+'{}/io/{}-nodal/{}-month/'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathout = revmpath+'{}/io/{}-nodal/{}/{}/'.format(
iso, product.lower(), market, submarket)
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
elif iso.upper() == 'ERCOT':
if (filepathin == None) and (market == 'da'):
filepathin = revmpath+'{}/in/lmp/{}/{}/'.format(iso, market, year)
elif (filepathout == None) and (market == 'rt'):
filepathout = revmpath+'{}/io/lmp-nodal/{}-month/'.format(iso, market)
elif filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
else:
if filepathin == None:
filepathin = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
### Make output folders if necessary
if not os.path.exists(filepathout):
os.makedirs(filepathout, exist_ok=True)
if not os.path.exists(revmpath+'{}/io/missingnodes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/missingnodes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/datatimes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/year/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market)):
os.makedirs(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market),
exist_ok=True)
print(filepathout)
### Shared components
nodesfiles = {
'CAISO': revmpath+'CAISO/io/caiso-node-latlon.csv',
'ERCOT': revmpath+'ERCOT/io/ercot-node-latlon.csv',
#'MISO': revmpath+'MISO/in/miso-node-map.csv',
'MISO': revmpath+'MISO/io/miso-node-latlon.csv',
# 'PJM': revmpath+'PJM/io/pjm-pnode-latlon-uniquepoints.csv',
'PJM': revmpath+'PJM/io/pjm-node-latlon.csv',
'NYISO': revmpath+'NYISO/io/nyiso-node-latlon.csv',
'ISONE': revmpath+'ISONE/io/isone-node-latlon.csv'
}
if nodesfile is None:
nodesfile = nodesfiles[iso]
resolution = {
'CAISO': {'da': 60, 'rt': 5}, 'ERCOT': {'da': 60, 'rt': 5},
'MISO': {'da': 60, 'rt': 60}, 'PJM': {'da': 60, 'rt': 60},
'NYISO': {'da': 60, 'rt': 5}, 'ISONE': {'da': 60, 'rt': 60},
}
### Get file list and iso/market info
# files = glob('{}{}*'.format(filepathin, year))
files = sorted(glob('{}{}*'.format(filepathin, year)))
print('head(files):')
for file in files[:3]:
print(file)
print('tail(files):')
for file in files[-3:]:
print(file)
timezone = pvvm.toolbox.tz_iso[iso]
res = resolution[iso][market]
### Make the inputs easier to work with
iso = iso.upper()
hours = pvvm.toolbox.yearhours(year)
dates = pvvm.toolbox.makedays(year)
### DO: figure out how to generalize this
# if len(files) != len(dates):
# print('len(files) = {}'.format(len(files)))
# print('len(dates) = {}'.format(len(dates)))
# raise Exception("files and dates don't match")
if iso == 'ISONE':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True,
names=['Node'], skiprows=1)
### Load daily files
colnames = ['intime', 'node', 'lmp']
dfdict = {}
for i in trange(len(files)):
dfday = pd.read_csv(
files[i], skiprows=6, usecols=[2,4,6], names=colnames,
dtype={'intime':str, 'node':'category', 'lmp':float})
dfday.drop(dfday.index[-1], inplace=True)
dfday.loc[:,'intime'] = dates[i] + 'H' + dfday.loc[:,'intime']
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
### Make new index
oldtime = list(dfall.intime.unique())
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
dfall.loc[:,'intime'] = dfall.loc[:,'intime'].apply(
lambda x: indexconvert[x])
dfall.loc[:,'intime'] = pd.to_datetime(dfall['intime'])
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'MISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['Node'])
### Pick columns from input file
usecols = [0, 2,
3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,
15,16,17,18,19,20,21,22,23,24,25,26]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['Node', 'Value']
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=5, header=None,
usecols=usecols,
dtype={0: 'category'}, names=colnames)
dfday = dfin.loc[dfin['Value'] == 'LMP'].T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0,1]])
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j]])
numhours = hours - len(df[df[nodesin[j]].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'PJM':
### Set skiprows (different headers for 'da' and 'rt' markets)
skiprows = {'da': 8, 'rt': 18}
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True)
### Pick columns from input file
usecols = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_springforward = [1,
7, 10, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_fallback = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76, 79]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['PnodeID']
if dates[i] not in [pvvm.toolbox.dst_springforward[year], pvvm.toolbox.dst_fallback[year]]:
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_springforward[year]:
for j in range(23):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_springforward,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_fallback[year]:
for j in range(25):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_fallback,
dtype={1: 'category'}, names=colnames)
dfday = dfin.T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0]])
del dfday[np.nan]
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j].astype(str)])
numhours = hours - len(df[df[nodesin[j].astype(str)].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv(filepathout + '{}-{}.gz'.format(nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'NYISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['node'], skiprows=1)
if market == 'da':
dates = pvvm.toolbox.makedays(year)
if len(files) != len(dates):
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
raise Exception("files and dates don't match")
### Make daylight savings mangler
def dstfallback(dataframe):
fallback = pvvm.toolbox.dst_fallback[year]
backfall = '{}/{}/{}'.format(fallback[4:6], fallback[6:], fallback[:4])
fallbackhalf = int(len(dataframe[dataframe['intime'] == backfall + ' 01:00'])/2)
if str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[0,1]) != \
str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[fallbackhalf,1]):
raise Exception("DST fallback ptid's don't match.")
mask = dataframe['intime'] == backfall + ' 01:00'
mask.iloc[fallbackhalf:2*fallbackhalf] = False
dataframe.loc[mask, 'intime'] = backfall + ' 01:00 DST'
print("DST fallback conversion worked!")
return dataframe
### Make datetime converter
def makeindexconvert(files, dates):
"""
"""
dicttimes = {}
for i in trange(len(files)):
df = pd.read_csv(files[i],
usecols = [0,2,3], skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'ptid': 'category', 'lmp': float})
if dates[i] == pvvm.toolbox.dst_fallback[year]:
# print(df.head())
df = dstfallback(df)
dicttimes[dates[i]] = df
dftimes = | pd.concat(dicttimes, copy=False) | pandas.concat |
#TODO: DISTINCT
from abc import abstractmethod
from numpy.lib.arraysetops import isin
from models.instructions.Expression.expression import *
from pandas.core.frame import DataFrame
from models.instructions.DML.special_functions import *
from models.nodo import Node
import pandas as pd
class Instruction:
'''Clase abstracta'''
@abstractmethod
def process(self):
''' metodo para la ejecucion '''
pass
@abstractmethod
def compile(self):
''' metodo para la ejecucion '''
pass
class From(Instruction):
'''
FROM recibe una tabla en la cual buscar los datos
'''
def __init__(self, tables) :
self.tables = tables
if self.tables is None:
self.alias = None
else:
self.alias = f'{self.tables[0].alias}'
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
try:
tables = loop_list(self.tables,instrucction)
lista1 = []
lista2 = []
if isinstance(tables, DataFrame):
return [tables]
else:
lista_name_original = tables[0]
lista_alias = tables[1]
if len(tables) > 0:
for index, data in enumerate(lista_name_original):
data_frame = select_all(data, 0, 0, lista_alias[index])
lista1.append(data_frame)
lista2.append(lista_alias[index])
if len(lista1) > 1:
cross_join = self.union_tables(lista1)
storage_columns(cross_join.values.tolist(), cross_join.columns.tolist(), 0, 0)
for data in lista2:
storage_table(cross_join.values.tolist(), cross_join.columns.tolist(), data, 0, 0)
return [cross_join, lista2[0]]
else:
return [lista1[0], lista2[0]]
except:
desc = "FATAL ERROR, murio en From, F"
ErrorController().add(34, 'Execution', desc, 0, 0)
def union_tables(self, right: list):
if len(right) < 1:
return
for index in right:
index['key'] = 1
left = right[0]
for index, _ in enumerate(right):
if index == len(right)-1:
break
else:
left = | pd.merge(left, right[index+1], on=['key']) | pandas.merge |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# moved from habitat_baselines due to dependency issues
import os
from typing import Any, Dict, List, Optional
import matplotlib.pyplot as plt
import numpy as np
import torch
from habitat.core.utils import try_cv2_import
from habitat.utils.visualizations.utils import images_to_video
from mpl_toolkits.axes_grid1 import ImageGrid
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
import seaborn as sns
from habitat.utils.visualizations import maps
from PIL import Image
from src.constants.constants import NumericalMetrics
from habitat.utils.visualizations.utils import draw_collision
cv2 = try_cv2_import()
class TensorboardWriter:
def __init__(self, log_dir: str, *args: Any, **kwargs: Any):
r"""A Wrapper for tensorboard SummaryWriter. It creates a dummy writer
when log_dir is empty string or None. It also has functionality that
generates tb video directly from numpy images.
Args:
log_dir: Save directory location. Will not write to disk if
log_dir is an empty string.
*args: Additional positional args for SummaryWriter
**kwargs: Additional keyword args for SummaryWriter
"""
self.writer = None
if log_dir is not None and len(log_dir) > 0:
self.writer = SummaryWriter(log_dir, *args, **kwargs)
def __getattr__(self, item):
if self.writer:
return self.writer.__getattribute__(item)
else:
return lambda *args, **kwargs: None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.writer:
self.writer.close()
def add_video_from_np_images(
self, video_name: str, step_idx: int, images: np.ndarray, fps: int = 10
) -> None:
r"""Write video into tensorboard from images frames.
Args:
video_name: name of video string.
step_idx: int of checkpoint index to be displayed.
images: list of n frames. Each frame is a np.ndarray of shape.
fps: frame per second for output video.
Returns:
None.
"""
if not self.writer:
return
# initial shape of np.ndarray list: N * (H, W, 3)
frame_tensors = [torch.from_numpy(np_arr).unsqueeze(0) for np_arr in images]
video_tensor = torch.cat(tuple(frame_tensors))
video_tensor = video_tensor.permute(0, 3, 1, 2).unsqueeze(0)
# final shape of video tensor: (1, n, 3, H, W)
self.writer.add_video(video_name, video_tensor, fps=fps, global_step=step_idx)
def generate_video(
video_option: List[str],
video_dir: Optional[str],
images: List[np.ndarray],
episode_id: int,
scene_id: int,
agent_seed: int,
checkpoint_idx: int,
metrics: Dict[str, float],
tb_writer: TensorboardWriter,
fps: int = 10,
) -> None:
r"""Generate video according to specified information.
Args:
video_option: string list of "tensorboard" or "disk" or both.
video_dir: path to target video directory.
images: list of images to be converted to video.
episode_id: episode id for video naming.
scene_id: scene id for video naming.
agent_seed: agent initialization seed for video naming.
checkpoint_idx: checkpoint index for video naming.
metric_name: name of the performance metric, e.g. "spl".
metric_value: value of metric.
tb_writer: tensorboard writer object for uploading video.
fps: fps for generated video.
Returns:
None
"""
if len(images) < 1:
return
metric_strs = []
for k, v in metrics.items():
metric_strs.append(f"{k}={v:.2f}")
scene_id = os.path.basename(scene_id)
video_name = (
f"episode={episode_id}-scene={scene_id}-seed={agent_seed}-ckpt={checkpoint_idx}-"
+ "-".join(metric_strs)
)
if "disk" in video_option:
assert video_dir is not None
images_to_video(images, video_dir, video_name)
if "tensorboard" in video_option:
tb_writer.add_video_from_np_images(
f"episode{episode_id}", checkpoint_idx, images, fps=fps
)
def generate_grid_of_maps(episode_id, scene_id, seeds, maps, map_dir):
"""
Paste top-down-maps from agent initialized with the given seeds to a grid
image. Save the grid image to <map_dir>/episode=<episode_id>-scene=<scene_id>.png.
Code modified based on tutorial from
https://matplotlib.org/stable/gallery/axes_grid1/simple_axesgrid.html
Args:
episode_id: episode's ID.
scene_id: scene ID of the episode, starts with data/dataset/...
seeds: seeds used to initialize the agents.
maps: maps produced by the agents navigating in np.ndarray format. Should be in
the same order as seeds.
map_dir: directory to store the map
"""
fig = plt.figure(figsize=(16.0, 4.0))
grid = ImageGrid(
fig,
111, # similar to subplot(111)
nrows_ncols=(2, int(np.ceil(len(seeds) / 2))), # creates n/2 x 2 grid of axes
axes_pad=0.4, # pad between axes in inch.
)
for ax, im, seed in zip(grid, maps, seeds):
# iterating over the grid to return the axes
ax.set_title(f"Seed={seed}", fontdict=None, loc="center", color="k")
ax.imshow(im)
fig.savefig(
f"{map_dir}/episode={episode_id}-scene={os.path.basename(scene_id)}.png"
)
plt.close(fig)
def colorize_and_fit_to_height(top_down_map_raw: np.ndarray, output_height: int):
r"""Given the output of the TopDownMap measure, colorizes the map,
and fits to a desired output height. Modified on the basis of
maps.colorize_draw_agent_and_fit_to_height from habitat-lab
:param top_down_map_raw: raw top-down map
:param output_height: The desired output height
"""
top_down_map = maps.colorize_topdown_map(top_down_map_raw, None)
if top_down_map.shape[0] > top_down_map.shape[1]:
top_down_map = np.rot90(top_down_map, 1)
# scale top down map to align with rgb view
old_h, old_w, _ = top_down_map.shape
top_down_height = output_height
top_down_width = int(float(top_down_height) / old_h * old_w)
# cv2 resize (dsize is width first)
top_down_map = cv2.resize(
top_down_map,
(top_down_width, top_down_height),
interpolation=cv2.INTER_CUBIC,
)
return top_down_map
def save_blank_map(episode_id: str, scene_id: str, blank_map: np.ndarray, map_dir: str):
r"""
Save the given blank map in .pgm format in <map_dir>/
:param episode_id: episode ID
:param scene_id: scene ID
:param blank_map: blank top-down map of the specified episode
:param map_dir: directory to save the map
"""
map_img = Image.fromarray(blank_map, "RGB")
map_img.save(
f"{map_dir}/blank_map-episode={episode_id}-scene={os.path.basename(scene_id)}.pgm"
)
def resolve_metric_unit(metric_name):
r"""
Return a string of the unit of the given metric.
:param metric_name: name of the metric
:return: a unit string
"""
if metric_name == NumericalMetrics.DISTANCE_TO_GOAL:
return "(meters)"
elif (
metric_name == NumericalMetrics.SUCCESS
or metric_name == NumericalMetrics.SPL
or metric_name == NumericalMetrics.NUM_STEPS):
return ""
elif (
metric_name == NumericalMetrics.SIM_TIME
or metric_name == NumericalMetrics.RESET_TIME
or metric_name == NumericalMetrics.AGENT_TIME):
return "(seconds)"
def visualize_variability_due_to_seed_with_box_plots(
metrics_list: List[Dict[str, Dict[str, float]]],
seeds: List[int],
plot_dir: str,
):
r"""
Generate box plots from metrics and seeds. Requires same metrics collected
from all seeds. Save the plots to <plot_dir>/<metric_name>-<n>_seeds.png,
where <metric_name> is for eg. "spl", <n> is the number of seeds.
Args:
metrics_list: list of metrics collected from experiment run with the
given seeds.
seeds: seeds to initialize agents. Should be in the same order as
metrics_list.
plot_dir: directory to save the box plot.
"""
# check if we have metrics from all seeds
num_seeds = len(seeds)
assert len(metrics_list) == num_seeds
# return if no data
if num_seeds == 0:
return
num_samples_per_seed = len(metrics_list[0])
if num_samples_per_seed == 0:
return
# check if all seeds have the same number of data points
# for i in range(num_seeds):
# assert len(metrics_list[i]) == num_samples_per_seed
# extract metric names
metric_names = []
for _, episode_metrics in metrics_list[0].items():
for metric_name, _ in episode_metrics.items():
metric_names.append(metric_name)
break
# build dataframe
data = {}
total_num_samples = num_samples_per_seed * num_seeds
data["seed"] = np.ndarray((total_num_samples,))
for metric_name in metric_names:
data[metric_name] = np.ndarray((total_num_samples,))
# populate each array
total_sample_count = 0
for seed_index in range(num_seeds):
for _, episode_metrics in metrics_list[seed_index].items():
# register a new sample
data["seed"][total_sample_count] = seeds[seed_index]
for metric_name in metric_names:
data[metric_name][total_sample_count] = episode_metrics[metric_name]
total_sample_count += 1
df = pd.DataFrame(data)
# drop invalid samples
# code adapted from piRSquared's work on
# https://stackoverflow.com/questions/45745085/python-pandas-how-to-remove-nan-and-inf-values
df = df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# create box-and-strip plot for each metric
for metric_name in metric_names:
fig = plt.figure(figsize=(12.8, 9.6))
ax = fig.add_subplot(111)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
sns.boxplot(x="seed", y=metric_name, data=df, ax=ax)
sns.stripplot(x="seed", y=metric_name, data=df, color=".25", size=2, ax=ax)
fig.subplots_adjust(bottom=0.15)
fig.savefig(f"{plot_dir}/{metric_name}-{num_seeds}_seeds.png")
plt.close(fig)
def visualize_metrics_across_configs_with_box_plots(
metrics_list: List[Dict[str, Dict[str, float]]],
config_names: List[str],
configs_or_seeds: str,
plot_dir: str,
):
r"""
Generate box plots from metrics and experiment configurations. Requires same
metrics collected across all configs. Save the plots to
<plot_dir>/<metric_name>.png, where <metric_name> is for eg. "agent_time".
Args:
metrics_list: list of metrics collected from experiment run with the
given seeds.
config_names: names of experiment configurations. Should be in the same
order as metrics_list.
configs_or_seeds: if visualizing across configs or seeds. Can only be
"configurations" or "seeds"
plot_dir: directory to save the box plot.
"""
# check configs_or_seeds
assert configs_or_seeds in ["configurations", "seeds"]
# check if we have metrics from all configs
num_configs = len(config_names)
assert len(metrics_list) == num_configs
# return if no data
if num_configs == 0:
return
num_samples_per_config = len(metrics_list[0])
if num_samples_per_config == 0:
return
# check if all configs have the same number of data points
# for i in range(num_configs):
# assert len(metrics_list[i]) == num_samples_per_config
# extract metric names
metric_names = []
for _, episode_metrics in metrics_list[0].items():
for metric_name, _ in episode_metrics.items():
metric_names.append(metric_name)
break
# build dataframe
data = {}
total_num_samples = num_samples_per_config * num_configs
data[configs_or_seeds] = np.ndarray((total_num_samples,), dtype=object)
for metric_name in metric_names:
data[metric_name] = np.ndarray((total_num_samples,))
# populate each array
total_sample_count = 0
for config_index in range(num_configs):
for _, episode_metrics in metrics_list[config_index].items():
# register a new sample
data[configs_or_seeds][total_sample_count] = config_names[config_index]
for metric_name in metric_names:
data[metric_name][total_sample_count] = episode_metrics[metric_name]
total_sample_count += 1
df = pd.DataFrame(data)
# drop invalid samples
# code adapted from piRSquared's work on
# https://stackoverflow.com/questions/45745085/python-pandas-how-to-remove-nan-and-inf-values
df = df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# create box-and-strip plot for each metric
for metric_name in metric_names:
fig = plt.figure(figsize=(12.8, 9.6))
ax = fig.add_subplot(111)
sns.set(font_scale=1.2, style="white")
sns.boxplot(x=configs_or_seeds, y=metric_name, data=df, ax=ax)
sns.stripplot(x=configs_or_seeds, y=metric_name, data=df, color=".25", size=2, ax=ax)
ax.set_xlabel(f"{configs_or_seeds}", fontsize=22)
if configs_or_seeds == "seeds":
plt.xticks(rotation=90, ha="right")
plt.subplots_adjust(bottom=0.2)
else:
plt.xticks(rotation=0)
plt.subplots_adjust(bottom=0.1)
ax.set_ylabel(f"{metric_name.value} {resolve_metric_unit(metric_name)}", fontsize=22)
#fig.suptitle(f"{metric_name.value} across {num_configs} {configs_or_seeds}")
fig.savefig(f"{plot_dir}/{metric_name}-{num_configs}_{configs_or_seeds}.png")
plt.close(fig)
def visualize_success_across_configs_with_pie_charts(
metrics_list: List[Dict[str, Dict[str, float]]],
config_names: List[str],
configs_or_seeds: str,
plot_dir: str,
):
r"""
Generate pie charts to show success counts across experiment configurations. Require
the success metric collected across all configs. Save the plot to <plot_dir>/success.png.
Args:
metrics_list: list of metrics collected from experiment run with the
given seeds.
config_names: names of experiment configurations. Should be in the same
order as metrics_list.
configs_or_seeds: if visualizing across configs or seeds. Can only be
"configurations" or "seeds"
plot_dir: directory to save the pie chart.
"""
# check configs_or_seeds
assert configs_or_seeds in ["configurations", "seeds"]
# check if we have metrics from all configs
num_configs = len(config_names)
assert len(metrics_list) == num_configs
# return if no data
if num_configs == 0:
return
num_samples_per_config = len(metrics_list[0])
if num_samples_per_config == 0:
return
# check if all configs have the same number of data points
# for i in range(num_configs):
# assert len(metrics_list[i]) == num_samples_per_config
# build data for plotting
success_counts = []
for config_index in range(0, num_configs):
# count success in this config
dict_of_metrics = metrics_list[config_index]
count = 0
for _, per_episode_metrics in dict_of_metrics.items():
count += per_episode_metrics[NumericalMetrics.SPL]
success_counts.append(count)
dict_of_success_counts = {}
for config_index in range(0, num_configs):
dict_of_success_counts[config_names[config_index]] = [
success_counts[config_index],
num_samples_per_config - success_counts[config_index],
]
# create pie plots for all configs
fig, axes = plt.subplots(
int(np.ceil(num_configs/2)),
2,
sharey=True,
figsize=(9.6, 12.8))
axes_flattened = axes.ravel()
for config_index in range(0, num_configs):
config_name = config_names[config_index]
data_per_config = dict_of_success_counts[config_name]
axes_flattened[config_index].pie(data_per_config,
labels=["success", "fail"],
autopct='%1.1f%%',
shadow=False,
startangle=90)
axes_flattened[config_index].set_title(config_name)
#fig.suptitle(f"proportion of succeeded/failed episodes across {num_configs} {configs_or_seeds}")
fig.tight_layout()
fig.savefig(f"{plot_dir}/success-{num_configs}_{configs_or_seeds}.png")
plt.close(fig)
def visualize_metrics_across_configs_with_histograms(
metrics_list: List[Dict[str, Dict[str, float]]],
config_names: List[str],
configs_or_seeds: str,
plot_dir: str,
):
r"""
Generate histograms from metrics and experiment configurations. Requires same
metrics collected across all configs. Save the plots to
<plot_dir>/<metric_name>.png, where <metric_name> is for eg. "spl".
Args:
metrics_list: list of metrics collected from experiment run with the
given seeds.
config_names: names of experiment configurations. Should be in the same
order as metrics_list.
configs_or_seeds: if visualizing across configs or seeds. Can only be
"configurations" or "seeds"
plot_dir: directory to save the histograms.
"""
# check configs_or_seeds
assert configs_or_seeds in ["configurations", "seeds"]
# check if we have metrics from all configs
num_configs = len(config_names)
assert len(metrics_list) == num_configs
# return if no data
if num_configs == 0:
return
num_samples_per_config = len(metrics_list[0])
if num_samples_per_config == 0:
return
# check if all configs have the same number of data points
# for i in range(num_configs):
# assert len(metrics_list[i]) == num_samples_per_config
# extract metric names
metric_names = []
for _, episode_metrics in metrics_list[0].items():
for metric_name, _ in episode_metrics.items():
metric_names.append(metric_name)
break
# build data for plotting
data_all_configs_all_metrics = {} # Dict[Dict[List[float]]]
for config_index in range(0, num_configs):
data_per_config_all_metrics = {} # Dict[List[float]]
metrics_per_config = metrics_list[config_index]
for metric_name in metric_names:
data_per_config_per_metric = []
for _, dict_of_metrics in metrics_per_config.items():
if (
not np.isnan(dict_of_metrics[metric_name])
and not np.isinf(dict_of_metrics[metric_name])
):
data_per_config_per_metric.append(dict_of_metrics[metric_name])
data_per_config_all_metrics[metric_name] = data_per_config_per_metric
data_all_configs_all_metrics[config_names[config_index]] = data_per_config_all_metrics
# plot histograms
for metric_name in metric_names:
fig, axes = plt.subplots(
int(np.ceil(num_configs/2)),
2,
sharey=True,
figsize=(12.8, 9.6))
axes_flattened = axes.ravel()
for config_index in range(0, num_configs):
config_name = config_names[config_index]
# create histogram per metric, per config
data_to_plot = data_all_configs_all_metrics[config_name][metric_name]
if (
metric_name == NumericalMetrics.DISTANCE_TO_GOAL
or metric_name == metric_name == NumericalMetrics.SPL
or metric_name == NumericalMetrics.NUM_STEPS
):
axes_flattened[config_index].hist(
data_to_plot,
bins=50
)
else:
raise NotImplementedError
axes_flattened[config_index].set_title(config_name, fontsize=18)
# set common x and y label. Code adapted from
# https://stackoverflow.com/questions/16150819/common-xlabel-ylabel-for-matplotlib-subplots
fig.text(0.5, 0.04, f"{metric_name.value} {resolve_metric_unit(metric_name)}", ha="center", fontsize=22)
fig.text(0.04, 0.5, "number of episodes", va="center", rotation="vertical", fontsize=22)
plt.xticks(rotation=0)
plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.6)
fig.savefig(f"{plot_dir}/{metric_name}-{num_configs}_{configs_or_seeds}.png")
plt.close(fig)
def observations_to_image_for_roam(
observation: Dict,
info: Dict,
max_depth: float,
) -> np.ndarray:
r"""Generate image of single frame from observation and info
returned from a single environment step(). Modified upon
habitat.utils.visualizations.observations_to_image().
Args:
observation: observation returned from an environment step().
info: info returned from an environment step().
max_depth: max depth reading of the depth sensor.
Returns:
generated image of a single frame.
"""
egocentric_view_l: List[np.ndarray] = []
if "rgb" in observation:
rgb = observation["rgb"]
if not isinstance(rgb, np.ndarray):
rgb = rgb.cpu().numpy()
egocentric_view_l.append(rgb)
# draw depth map if observation has depth info
if "depth" in observation:
depth_map = observation["depth"].squeeze() * (255.0 / max_depth)
if not isinstance(depth_map, np.ndarray):
depth_map = depth_map.cpu().numpy()
depth_map = depth_map.astype(np.uint8)
depth_map = np.stack([depth_map for _ in range(3)], axis=2)
egocentric_view_l.append(depth_map)
# add image goal if observation has image_goal info
if "imagegoal" in observation:
rgb = observation["imagegoal"]
if not isinstance(rgb, np.ndarray):
rgb = rgb.cpu().numpy()
egocentric_view_l.append(rgb)
assert len(egocentric_view_l) > 0, "Expected at least one visual sensor enabled."
egocentric_view = np.concatenate(egocentric_view_l, axis=1)
# draw collision
if "collisions" in info and info["collisions"]["is_collision"]:
egocentric_view = draw_collision(egocentric_view)
frame = egocentric_view
if "top_down_map_for_roam" in info:
top_down_map = maps.colorize_draw_agent_and_fit_to_height(
info["top_down_map_for_roam"], egocentric_view.shape[0]
)
frame = np.concatenate((egocentric_view, top_down_map), axis=1)
return frame
def visualize_running_times_with_bar_plots(
running_times: List[float], config_names: List[str], plot_dir: str
):
r"""
Visualize running times from multiple experiments as bar charts. Save the
plot to <plot_dir>/running_time_across_configs.png.
:param running_times: running times from a sequence of experiments.
:param config_names: names of experiment configs, should be in same order
as `running_times`
:param plot_dir: directory to save the plot
"""
# precondition check
assert len(running_times) == len(config_names)
fig = plt.figure(figsize=(12.8, 9.6))
data = {}
data["config"] = config_names
data["running_time"] = running_times
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 LinYulong. All Rights Reserved
#
# @Time : 2021/10/9 23:10
# @Author : LinYulong
import numpy
import pandas
from src.alg import cross_verify, math_helper
from src.excel import excel_helper
from src.train import train_cfg
from train import train
def merge_chd(df: pandas.DataFrame) -> pandas.DataFrame:
"""
将表格按组合并。
注意:sklearn 算法要求不是 float,所以统一乘精度要求然后取整
:param df:
:return:
"""
line_size = df.index.size
np_zero = numpy.zeros(shape=(line_size, 5), dtype=int)
ret = pandas.DataFrame(np_zero, columns=['G1', 'G2', 'G3', 'G4', 'G5'], dtype=int)
ret["G1"] = (df["CHD1"] + df["CHD2"] + df["CHD9"]) / 3
ret["G2"] = (df["CHD3"] + df["CHD4"] + df["CHD5"] + df["CHD6"] + df["CHD7"]) / 5
ret["G3"] = df["CHD11"]
ret["G4"] = (df["CHD8"] + df["CHD10"]) / 2
ret["G5"] = (df["CHD12"] + df["CHD13"] + df["CHD14"]) / 3
ret = ret * train_cfg.get_times()
ret = pandas.DataFrame(ret, dtype=int) # 截断(不是四舍五入)为整数
return ret
def merge_chd2(df: pandas.DataFrame) -> pandas.DataFrame:
"""
将表格按组,且区分重要性合并。
注意:sklearn 算法要求不是 float,所以统一乘精度要求然后取整
:param df:
:return:
"""
line_size = df.index.size
np_zero = numpy.zeros(shape=(line_size, 2), dtype=int)
ret = pandas.DataFrame(np_zero, columns=['G1', 'G2'], dtype=int)
ret["G1"] = df["CHD8"]
ret["G2"] = df["CHD10"]
ret = ret * train_cfg.get_times()
ret = pandas.DataFrame(ret, dtype=int) # 截断(不是四舍五入)为整数
return ret
def merge_chd3(df: pandas.DataFrame) -> pandas.DataFrame:
"""
将表格按组,且区分重要性合并。
注意:sklearn 算法要求不是 float,所以统一乘精度要求然后取整
:param df:
:return:
"""
line_size = df.index.size
np_zero = numpy.zeros(shape=(line_size, 3), dtype=int)
ret = pandas.DataFrame(np_zero, columns=['G1', 'G2', 'G3'], dtype=int)
ret["G1"] = df["CHD12"]
ret["G2"] = df["CHD13"]
ret["G3"] = df["CHD14"]
ret = ret * train_cfg.get_times()
ret = | pandas.DataFrame(ret, dtype=int) | pandas.DataFrame |
import datetime
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool
from itertools import zip_longest
from os.path import isfile, join
import cProfile
import io
import pstats
import fitdecode
from utils import log, list_all_files
def profile(func):
def wrapper(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = func(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = pstats.SortKey.CUMULATIVE # 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return wrapper
def fit_is_running(fit_file_name):
try:
with fitdecode.FitReader(fit_file_name) as fit:
for frame in fit:
if (isinstance(frame, fitdecode.FitDataMessage) and
frame.name in ['sport', 'workout', 'lap', 'session']):
if frame.has_field('sport'):
if frame.get_field('sport').value != 'running':
return False
else:
return True
except:
return False
return False
def add_delta_col(df, orig_col, rolled_col, delta_col, is_time=False):
df[rolled_col] = df[orig_col]
df.iloc[-1, df.columns.get_loc(rolled_col)] = np.nan
df[rolled_col] = np.roll(df[rolled_col], 1)
df = df.fillna(method='bfill')
if is_time:
df[delta_col] = df.apply(lambda x: (x[orig_col] - x[rolled_col]).total_seconds(), axis=1)
else:
df[delta_col] = df.apply(lambda x: (x[orig_col] - x[rolled_col]), axis=1)
return df
# @profile
def extract_points(fit_file_name):
points = []
if not fit_is_running(fit_file_name):
return None
with fitdecode.FitReader(fit_file_name) as fit:
for frame in fit:
if isinstance(frame, fitdecode.FitDataMessage) and frame.name == 'record':
fields_read = ['timestamp', 'distance', 'heart_rate', 'enhanced_speed',
'enhanced_altitude']
optional_fields = ['heart_rate', 'enhanced_altitude']
current_point = []
for f in fields_read:
if frame.has_field(f):
current_point.append(frame.get_field(f).value)
else:
if f in optional_fields:
current_point.append(0)
else:
log("File %s from %s is missing field %s. It has fields: %s" %
(fit_file_name, frame.get_field('timestamp').value, f,
", ".join([field.name for field in frame.fields])))
return None
points.append(current_point)
if len(points) == 0:
return None
df = pd.DataFrame(points, columns=['time', 'distance', 'heart_rate', 'speed', 'altitude'])
df = df.sort_values(by=['time'])
df = df.reset_index()
df = df.fillna(method='ffill')
df = df.fillna(method='bfill')
df['time'] = pd.to_datetime(df['time'], utc=True)
df['time'] = df['time'].dt.tz_localize(tz=None)
df = add_delta_col(df, 'time', 'time-start', 'time_delta', True)
df = add_delta_col(df, 'distance', 'distance-start', 'distance_delta')
df['distance_cumsum'] = df['distance_delta'].cumsum()
df['time_cumsum'] = df['time_delta'].cumsum()
return df
# @profile
def get_best_section(fit_file, df, section):
# If the total distance of the workout is smaller then the section
# we're looking for we can skip this iteration.
if df['distance_delta'].sum() < section:
return None
column_names = [
'date', 'section', 'filename', 'time', 'distance', 'minutes_per_kilometer',
'total_distance', 'total_time']
section_list = []
date = df['time'].min()
total_distance = df['distance_delta'].sum()
total_time = df['time_delta'].sum()
df_distance_cumsum = df['distance_cumsum']
for i in range(len(df.index)):
curr_row = df.loc[i]
distance_cumsum = curr_row['distance_cumsum']
time_cumsum = curr_row['time_cumsum']
df_section = df[(df_distance_cumsum - distance_cumsum) >= section]
if(len(df_section.index) != 0):
time = df_section['time_cumsum'].iat[0] - time_cumsum
distance_i = df_section['distance_cumsum'].iat[0] - distance_cumsum
minutes_per_kilometer = (time/60)/(distance_i/1000)
section_list.append([
date, section, fit_file, time, distance_i, minutes_per_kilometer,
total_distance, total_time])
df_output = pd.DataFrame(section_list, columns=column_names)
return df_output.loc[df_output['minutes_per_kilometer'].idxmin()]
def process_file(input_folder, output_folder, invalid_folder, fit_file, sections):
if fit_file is None:
return
df_final = | pd.DataFrame(columns=['time', 'distance', 'minutes_per_kilometer']) | pandas.DataFrame |
from preppy524 import datatype
import pandas as pd
import pytest
test_dict = {'cat1': ['apple', None, 'pear', 'banana', 'blueberry', 'lemon'],
'num1': [0, 1, 2, 3, 4, 5],
'cat2': [True, False, False, True, False, None],
'num2': [0, 16, 7, None, 10, 14],
'num3': [0.5, 3, 3.9, 5.5, 100.2, 33]}
test_data = | pd.DataFrame(test_dict) | pandas.DataFrame |
import pandas as pd
import numpy as np
from ionsrcopt import source_stability as stab
class TestSourceStability:
def test_stability_mean_variance_classification(self):
def timedelta_to_seconds(timedelta):
if not pd.isnull(timedelta):
return timedelta.total_seconds()
else:
return np.nan
values = 10000 * [1]
timestamps = range(0, (int)(1e13), (int)(1e9))
df = pd.DataFrame()
df["Timestamp"] = timestamps
df["value"] = values
df["Timestamp"] = pd.to_datetime(df["Timestamp"])
df = df.set_index( | pd.DatetimeIndex(df["Timestamp"]) | pandas.DatetimeIndex |
"""This file is the pipeline for data etl"""
# import relation package.
import pickle
import pandas as pd
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
# import project package.
from config.config_setting import ConfigSetting
class DataEtlService:
def __init__(self):
config_setting = ConfigSetting()
self.config = config_setting.yaml_parser()
self.log = config_setting.set_logger(["data_etl_service"])
self.df = {}
self.train_label = []
self.one_hot_encoder = None
self.label_encoder = LabelEncoder()
def load_data(self):
# read train data
self.df['train'] = pd.read_csv(self.config['extract']['train_file'])
self.train_label = list(self.df['train']['SalePrice'])
# read test data
self.df['test'] = | pd.read_csv(self.config['extract']['test_file']) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: | pd.Timestamp("2012-08-25 00:00:00") | pandas.Timestamp |
import pandas as pd
from sktime.transformers.series_as_features.base import \
BaseSeriesAsFeaturesTransformer
from sktime.utils.data_container import tabularize
from sktime.utils.validation.series_as_features import check_X
__author__ = "<NAME>"
class PAA(BaseSeriesAsFeaturesTransformer):
""" (PAA) Piecewise Aggregate Approximation Transformer, as described in
<NAME>, <NAME>, <NAME>, and <NAME>.
Dimensionality reduction for fast similarity search in large time series
databases.
Knowledge and information Systems, 3(3), 263-286, 2001.
For each series reduce the dimensionality to num_intervals, where each
value is the mean of values in
the interval.
TO DO: pythonise it to make it more efficient. Maybe check vs this version
http://vigne.sh/posts/piecewise-aggregate-approx/
Could have: Tune the interval size in fit somehow?
Parameters
----------
num_intervals : int, dimension of the transformed data (default 8)
"""
def __init__(self,
num_intervals=8
):
self.num_intervals = num_intervals
super(PAA, self).__init__()
def set_num_intervals(self, n):
self.num_intervals = n
def transform(self, X, y=None):
"""
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, 1]
Nested dataframe with univariate time-series in cells.
Returns
-------
dims: Pandas data frame with first dimension in column zero
"""
self.check_is_fitted()
X = check_X(X, enforce_univariate=True)
X = tabularize(X, return_array=True)
num_atts = X.shape[1]
num_insts = X.shape[0]
dims = pd.DataFrame()
data = []
for i in range(num_insts):
series = X[i, :]
frames = []
current_frame = 0
current_frame_size = 0
frame_length = num_atts / self.num_intervals
frame_sum = 0
for n in range(num_atts):
remaining = frame_length - current_frame_size
if remaining > 1:
frame_sum += series[n]
current_frame_size += 1
else:
frame_sum += remaining * series[n]
current_frame_size += remaining
if current_frame_size == frame_length:
frames.append(frame_sum / frame_length)
current_frame += 1
frame_sum = (1 - remaining) * series[n]
current_frame_size = (1 - remaining)
# if the last frame was lost due to double imprecision
if current_frame == self.num_intervals - 1:
frames.append(frame_sum / frame_length)
data.append( | pd.Series(frames) | pandas.Series |
import re
import sys
import matplotlib.pyplot as plt
import pandas as pd
from bld.project_paths import project_paths_join as ppj
def data_prep(data):
"""Function calculating yearly and monthly change
Args:
data (pd.Dataframe): dataset with predicted values
Returns:
| monthly_change (pd.Dataframe): includes monthly change in mean prices
| yearly_change (pd.Dataframe): includes change in mean prices compared
to last year's same month
"""
mean_df = data.mean(axis=0)
# Rearranging the index
mean_df.index = | pd.to_datetime(mean_df.index, format="%Y_%m_%d") | pandas.to_datetime |
import os
import time
import random
import argparse
import numpy as np
import pandas as pd
import cv2
import PIL.Image
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from torch.optim.lr_scheduler import CosineAnnealingLR
from util import GradualWarmupSchedulerV2
import apex
from apex import amp
from dataset import get_df, get_transforms, MelanomaDataset
from models import Effnet_Melanoma, Resnest_Melanoma, Seresnext_Melanoma
from train import get_trans
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--kernel-type', type=str, required=True)
parser.add_argument('--data-dir', type=str, default='/raid/')
parser.add_argument('--data-folder', type=int, required=True)
parser.add_argument('--image-size', type=int, required=True)
parser.add_argument('--enet-type', type=str, required=True)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--num-workers', type=int, default=32)
parser.add_argument('--out-dim', type=int, default=9)
parser.add_argument('--use-amp', action='store_true')
parser.add_argument('--use-meta', action='store_true')
parser.add_argument('--DEBUG', action='store_true')
parser.add_argument('--model-dir', type=str, default='./weights')
parser.add_argument('--log-dir', type=str, default='./logs')
parser.add_argument('--oof-dir', type=str, default='./oofs')
parser.add_argument('--eval', type=str, choices=['best', 'best_20', 'final'], default="best")
parser.add_argument('--CUDA_VISIBLE_DEVICES', type=str, default='0')
parser.add_argument('--n-meta-dim', type=str, default='512,128')
args, _ = parser.parse_known_args()
return args
def val_epoch(model, loader, mel_idx, is_ext=None, n_test=1, get_output=False):
model.eval()
val_loss = []
LOGITS = []
PROBS = []
TARGETS = []
with torch.no_grad():
for (data, target) in tqdm(loader):
if args.use_meta:
data, meta = data
data, meta, target = data.to(device), meta.to(device), target.to(device)
logits = torch.zeros((data.shape[0], args.out_dim)).to(device)
probs = torch.zeros((data.shape[0], args.out_dim)).to(device)
for I in range(n_test):
l = model(get_trans(data, I), meta)
logits += l
probs += l.softmax(1)
else:
data, target = data.to(device), target.to(device)
logits = torch.zeros((data.shape[0], args.out_dim)).to(device)
probs = torch.zeros((data.shape[0], args.out_dim)).to(device)
for I in range(n_test):
l = model(get_trans(data, I))
logits += l
probs += l.softmax(1)
logits /= n_test
probs /= n_test
LOGITS.append(logits.detach().cpu())
PROBS.append(probs.detach().cpu())
TARGETS.append(target.detach().cpu())
loss = criterion(logits, target)
val_loss.append(loss.detach().cpu().numpy())
val_loss = np.mean(val_loss)
LOGITS = torch.cat(LOGITS).numpy()
PROBS = torch.cat(PROBS).numpy()
TARGETS = torch.cat(TARGETS).numpy()
if get_output:
return LOGITS, PROBS
else:
acc = (PROBS.argmax(1) == TARGETS).mean() * 100.
auc = roc_auc_score((TARGETS == mel_idx).astype(float), PROBS[:, mel_idx])
auc_20 = roc_auc_score((TARGETS[is_ext == 0] == mel_idx).astype(float), PROBS[is_ext == 0, mel_idx])
return val_loss, acc, auc, auc_20
def main():
df, df_test, meta_features, n_meta_features, mel_idx = get_df(
args.kernel_type,
args.out_dim,
args.data_dir,
args.data_folder,
args.use_meta
)
transforms_train, transforms_val = get_transforms(args.image_size)
LOGITS = []
PROBS = []
dfs = []
for fold in range(5):
df_valid = df[df['fold'] == fold]
if args.DEBUG:
df_valid = pd.concat([
df_valid[df_valid['target'] == mel_idx].sample(args.batch_size * 3),
df_valid[df_valid['target'] != mel_idx].sample(args.batch_size * 3)
])
dataset_valid = MelanomaDataset(df_valid, 'valid', meta_features, transform=transforms_val)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batch_size, num_workers=args.num_workers)
if args.eval == 'best':
model_file = os.path.join(args.model_dir, f'{args.kernel_type}_best_fold{fold}.pth')
elif args.eval == 'best_20':
model_file = os.path.join(args.model_dir, f'{args.kernel_type}_best_20_fold{fold}.pth')
if args.eval == 'final':
model_file = os.path.join(args.model_dir, f'{args.kernel_type}_final_fold{fold}.pth')
model = ModelClass(
args.enet_type,
n_meta_features=n_meta_features,
n_meta_dim=[int(nd) for nd in args.n_meta_dim.split(',')],
out_dim=args.out_dim
)
model = model.to(device)
try: # single GPU model_file
model.load_state_dict(torch.load(model_file), strict=True)
except: # multi GPU model_file
state_dict = torch.load(model_file)
state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
model.load_state_dict(state_dict, strict=True)
if len(os.environ['CUDA_VISIBLE_DEVICES']) > 1:
model = torch.nn.DataParallel(model)
model.eval()
this_LOGITS, this_PROBS = val_epoch(model, valid_loader, mel_idx, is_ext=df_valid['is_ext'].values, n_test=8, get_output=True)
LOGITS.append(this_LOGITS)
PROBS.append(this_PROBS)
dfs.append(df_valid)
dfs = | pd.concat(dfs) | pandas.concat |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core import simulate
from activitysim.core import tracing
from activitysim.core import pipeline
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import expressions
from activitysim.abm.models.util.canonical_ids import set_trip_index
from activitysim.core.util import assign_in_place
from activitysim.core.util import reindex
from .util import estimation
logger = logging.getLogger(__name__)
@inject.injectable()
def stop_frequency_alts():
# alt file for building trips even though simulation is simple_simulate not interaction_simulate
file_path = config.config_file_path('stop_frequency_alternatives.csv')
df = pd.read_csv(file_path, comment='#')
df.set_index('alt', inplace=True)
return df
def process_trips(tours, stop_frequency_alts):
OUTBOUND_ALT = 'out'
assert OUTBOUND_ALT in stop_frequency_alts.columns
# get the actual alternatives for each person - have to go back to the
# stop_frequency_alts dataframe to get this - the stop_frequency choice
# column has the index values for the chosen alternative
trips = stop_frequency_alts.loc[tours.stop_frequency]
# assign tour ids to the index
trips.index = tours.index
"""
::
tours.stop_frequency => proto trips table
________________________________________________________
stop_frequency | out in
tour_id | tour_id
954910 1out_1in | 954910 1 1
985824 0out_1in | 985824 0 1
"""
# reformat with the columns given below
trips = trips.stack().reset_index()
trips.columns = ['tour_id', 'direction', 'trip_count']
# tours legs have one more leg than stop
trips.trip_count += 1
# prefer direction as boolean
trips['outbound'] = trips.direction == OUTBOUND_ALT
"""
tour_id direction trip_count outbound
0 954910 out 2 True
1 954910 in 2 False
2 985824 out 1 True
3 985824 in 2 False
"""
# now do a repeat and a take, so if you have two trips of given type you
# now have two rows, and zero trips yields zero rows
trips = trips.take(np.repeat(trips.index.values, trips.trip_count.values))
trips = trips.reset_index(drop=True)
grouped = trips.groupby(['tour_id', 'outbound'])
trips['trip_num'] = grouped.cumcount() + 1
trips['person_id'] = reindex(tours.person_id, trips.tour_id)
trips['household_id'] = reindex(tours.household_id, trips.tour_id)
trips['primary_purpose'] = reindex(tours.primary_purpose, trips.tour_id)
# reorder columns and drop 'direction'
trips = trips[['person_id', 'household_id', 'tour_id', 'primary_purpose',
'trip_num', 'outbound', 'trip_count']]
"""
person_id household_id tour_id primary_purpose trip_num outbound trip_count
0 32927 32927 954910 work 1 True 2
1 32927 32927 954910 work 2 True 2
2 32927 32927 954910 work 1 False 2
3 32927 32927 954910 work 2 False 2
4 33993 33993 985824 univ 1 True 1
5 33993 33993 985824 univ 1 False 2
6 33993 33993 985824 univ 2 False 2
"""
set_trip_index(trips)
return trips
@inject.step()
def stop_frequency(
tours, tours_merged,
stop_frequency_alts,
network_los,
chunk_size,
trace_hh_id):
"""
stop frequency model
For each tour, shoose a number of intermediate inbound stops and outbound stops.
Create a trip table with inbound and outbound trips.
Thus, a tour with stop_frequency '2out_0in' will have two outbound and zero inbound stops,
and four corresponding trips: three outbound, and one inbound.
Adds stop_frequency str column to trips, with fields
creates trips table with columns:
::
- person_id
- household_id
- tour_id
- primary_purpose
- atwork
- trip_num
- outbound
- trip_count
"""
trace_label = 'stop_frequency'
model_settings_file_name = 'stop_frequency.yaml'
model_settings = config.read_model_settings(model_settings_file_name)
tours = tours.to_frame()
tours_merged = tours_merged.to_frame()
assert not tours_merged.household_id.isnull().any()
assert not (tours_merged.origin == -1).any()
assert not (tours_merged.destination == -1).any()
nest_spec = config.get_logit_model_settings(model_settings)
constants = config.get_model_constants(model_settings)
# - run preprocessor to annotate tours_merged
preprocessor_settings = model_settings.get('preprocessor', None)
if preprocessor_settings:
# hack: preprocessor adds origin column in place if it does not exist already
assert 'origin' in tours_merged
assert 'destination' in tours_merged
od_skim_stack_wrapper = network_los.get_default_skim_dict().wrap('origin', 'destination')
skims = [od_skim_stack_wrapper]
locals_dict = {
"od_skims": od_skim_stack_wrapper,
'network_los': network_los
}
locals_dict.update(constants)
simulate.set_skim_wrapper_targets(tours_merged, skims)
# this should be pre-slice as some expressions may count tours by type
annotations = expressions.compute_columns(
df=tours_merged,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
assign_in_place(tours_merged, annotations)
tracing.print_summary('stop_frequency segments',
tours_merged.primary_purpose, value_counts=True)
spec_segments = model_settings.get('SPEC_SEGMENTS')
assert spec_segments is not None, f"SPEC_SEGMENTS setting not found in model settings: {model_settings_file_name}"
segment_col = model_settings.get('SEGMENT_COL')
assert segment_col is not None, f"SEGMENT_COL setting not found in model settings: {model_settings_file_name}"
nest_spec = config.get_logit_model_settings(model_settings)
choices_list = []
for segment_settings in spec_segments:
segment_name = segment_settings[segment_col]
segment_value = segment_settings[segment_col]
chooser_segment = tours_merged[tours_merged[segment_col] == segment_value]
if len(chooser_segment) == 0:
logging.info(f"{trace_label} skipping empty segment {segment_name}")
continue
logging.info(f"{trace_label} running segment {segment_name} with {chooser_segment.shape[0]} chooser rows")
estimator = estimation.manager.begin_estimation(model_name=segment_name, bundle_name='stop_frequency')
segment_spec = simulate.read_model_spec(file_name=segment_settings['SPEC'])
assert segment_spec is not None, "spec for segment_type %s not found" % segment_name
coefficients_file_name = segment_settings['COEFFICIENTS']
coefficients_df = simulate.read_model_coefficients(file_name=coefficients_file_name)
segment_spec = simulate.eval_coefficients(segment_spec, coefficients_df, estimator)
if estimator:
estimator.write_spec(segment_settings, bundle_directory=False)
estimator.write_model_settings(model_settings, model_settings_file_name, bundle_directory=True)
estimator.write_coefficients(coefficients_df, segment_settings)
estimator.write_choosers(chooser_segment)
estimator.set_chooser_id(chooser_segment.index.name)
choices = simulate.simple_simulate(
choosers=chooser_segment,
spec=segment_spec,
nest_spec=nest_spec,
locals_d=constants,
chunk_size=chunk_size,
trace_label=tracing.extend_trace_label(trace_label, segment_name),
trace_choice_name='stops',
estimator=estimator)
# convert indexes to alternative names
choices = | pd.Series(segment_spec.columns[choices.values], index=choices.index) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), | Series(dtype=dtype) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 24 16:43:33 2019
@author: jeremy_lehner
"""
import pandas as pd
import datetime
from selenium import webdriver
import time
from bs4 import BeautifulSoup
from os import path
def get_scrape_date():
"""
Gets the date on which data was scraped
Parameters
----------
None
Returns
-------
date : string
Date that data was scraped in the format 'YYYY-MM-DD'
"""
# Get current date and time
now = datetime.datetime.now()
year_scraped = str(now.year)
month_scraped = str(now.month)
day_scraped = str(now.day)
# Add leading zeroes to single-digit months and days
if len(month_scraped) == 1:
month_scraped = '0' + month_scraped
if len(day_scraped) == 1:
day_scraped = '0' + day_scraped
# Construct date string
date_data = year_scraped + '-' + month_scraped + '-' + day_scraped
# Bye! <3
return date_data
def scrape_champ_names(save=True):
"""
Scrapes champion names from League of Legends Wiki and saves them to
csv file, but returns nothing
Parameters
----------
save : boolean
Save names to csv file?
Returns
-------
None
"""
# Assign scrape path variables
url = 'https://leagueoflegends.fandom.com/wiki/List_of_champions'
# Get champion names
names = pd.read_html(url)[1]
names = list(names['Champion'])
names = [s.split(',')[0] for s in names]
names = [s.split('\xa0the')[0] for s in names]
names = pd.Series(names).rename('champion')
# Write names to csv file
if save:
names.to_csv('./data/champion_names.csv', index=False)
# Bye! <3
return
def scrape_release_dates(save=True):
"""
Scrapes champion release dates from League of Legends Wiki and saves them
to csv file, but returns nothing
Parameters
----------
save : boolean
Save release dates to csv file ('YYYY-MM-DD')?
Returns
-------
None
"""
# Assign scrape path variables
url = 'https://leagueoflegends.fandom.com/wiki/List_of_champions'
# Get release dates
dates = pd.read_html(url)[1]
dates = dates['Release Date'].rename('release_date')
# Write release dates to csv file
if save:
dates.to_csv('./data/champion_release_dates.csv', index=False)
# Bye! <3
return
def scrape_number_of_skins(names, save=True):
"""
Scrapes number of champion skins from League of Legends Wiki and saves
them to a csv file, but returns nothing
Parameters
----------
names : pandas series
Contains the champion names as strings in alphabetical order
save : boolean
Save number of champion skins to csv file?
Returns
-------
None
"""
# Assign scrape path variables
style = 'display:inline-block; margin:5px; width:342px'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
# Get number of skins
num_skins = []
for name in names:
name = name.replace(' ', '_')
skins_url = f'https://leagueoflegends.fandom.com/wiki/{name}/Skins'
driver.get(skins_url)
time.sleep(2)
soup = BeautifulSoup(driver.page_source, 'html.parser')
num_skins.append(len(soup.find_all('div', {'style': style})))
num_skins = pd.Series(num_skins)
# Close selenium web driver
driver.close()
if save:
num_skins.to_csv('./data/num_skins.csv', index=False)
# Bye! <3
return
def scrape_win_rates(save=True):
"""
Scrapes the current day North America champion win rates from op.gg and
saves them to a csv file along with the date, but returns nothing
Parameters
----------
save : boolean
Save win rates as csv file?
Returns
-------
None
"""
# Get date at time of scraping
date = get_scrape_date()
# Assign scraping variables
champstats_url = 'https://na.op.gg/statistics/champion/'
today_xpath = '//*[@id="recent_today"]/span/span'
winrate_xpath = '//*[@id="rate_win"]/span/span'
scroll_down = "window.scrollTo(0, document.body.scrollHeight);"
champs = 'Champion.1'
win = 'Win rate'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
driver.get(champstats_url)
# Select stats for current day
today_button = driver.find_element_by_xpath(today_xpath)
today_button.click()
# Select win rates
winrate_button = driver.find_element_by_xpath(winrate_xpath)
winrate_button.click()
# Scroll to bottom of page and wait to bypass ads
driver.execute_script(scroll_down)
time.sleep(10)
# Scrape win rates
winrates = pd.read_html(driver.page_source)[1]
winrates = winrates[[champs, win]]
# Sort win rates by champion in alphabetical order
winrates.sort_values(by=champs, inplace=True)
winrates = winrates[win].reset_index()[win]
# Close selenium web driver
driver.close()
# Convert win rates to float
winrates = winrates.str.replace('%', '')
winrates = round(winrates.astype('float')/100, 4)
# Add a column with the date
winrates = pd.DataFrame({'winrate': winrates, 'date': date})
# Write win rates to csv file
if save:
date = date.replace('-', '')
winrates.to_csv(f'./data/win/win_rates_{date}.csv', index=False)
else:
print('Win rates were scraped, but not saved!')
# Bye! <3
return
def scrape_ban_rates(save=True):
"""
Scrapes the current day North America champion ban rates from op.gg and
saves them to a csv file along with the date, but returns nothing
Parameters
----------
save : boolean
Save ban rates as csv file?
Returns
-------
None
"""
# Get date at time of scraping
date = get_scrape_date()
# Assign scraping variables
champstats_url = 'https://na.op.gg/statistics/champion/'
today_xpath = '//*[@id="recent_today"]/span/span'
banrate_xpath = '//*[@id="rate_ban"]/span/span'
scroll_down = "window.scrollTo(0, document.body.scrollHeight);"
champs = 'Champion.1'
ban = 'Ban ratio per game'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
driver.get(champstats_url)
# Select stats for current day
today_button = driver.find_element_by_xpath(today_xpath)
today_button.click()
# Select ban rates
banrate_button = driver.find_element_by_xpath(banrate_xpath)
banrate_button.click()
# Scroll to bottom of page and wait to bypass ads
driver.execute_script(scroll_down)
time.sleep(10)
# Scrape ban rates
banrates = pd.read_html(driver.page_source)[1]
banrates = banrates[[champs, ban]]
# Sort ban rates by champion in alphabetical order
banrates.sort_values(by=champs, inplace=True)
banrates = banrates[ban].reset_index()[ban]
# Close Selenioum web driver
driver.close()
# Convert ban rates to float
banrates = banrates.str.replace('%', '')
banrates = round(banrates.astype('float')/100, 4)
# Add a column with the date
banrates = | pd.DataFrame({'banrate': banrates, 'date': date}) | pandas.DataFrame |
#!/usr/bin/env python
import bz2
import gzip
import logging
import os
import subprocess
from collections import OrderedDict
from pathlib import Path
from pprint import pformat
import pandas as pd
import yaml
from Bio import SeqIO
def fetch_executable(cmd, ignore_errors=False):
executables = [
cp for cp in [
str(Path(p).joinpath(cmd))
for p in os.environ['PATH'].split(os.pathsep)
] if os.access(cp, os.X_OK)
]
if executables:
return executables[0]
elif ignore_errors:
return None
else:
raise RuntimeError(f'command not found: {cmd}')
def read_fasta_and_generate_seq(path):
print_log(f'Read a FASTA file:\t{path}')
if path.endswith('.gz'):
f = gzip.open(path, 'rt')
elif path.endswith('.bz2'):
f = bz2.open(path, 'rt')
else:
f = open(path, 'r')
for s in SeqIO.parse(f, 'fasta'):
yield s.id, s.seq
f.close()
def print_yml(data):
logger = logging.getLogger(__name__)
logger.debug(data)
print(yaml.dump(data))
def read_bed(path, merge=True, **kwargs):
print_log(
'Read a BED file{0}:\t{1}'.format(
(' (merging intervals)' if merge else ''), path
)
)
dtype = {
'chrom': str, 'chromStart': int, 'chromEnd': int, 'name': str,
'score': int, 'strand': str, 'thickStart': int, 'thickEnd': int,
'itemRgb': str, 'blockCount': int, 'blockSizes': int,
'blockStarts': int, 'ADDITIONAL': str
}
bed_lines = [d for d in _stream_bed_lines(path=path, **kwargs)]
return (
pd.DataFrame(bed_lines) if bed_lines else pd.DataFrame()
).pipe(
lambda d:
d.astype(dtype={k: v for k, v in dtype.items() if k in d.columns})
)
def read_vcf(path, sample_name=None, min_af=None, max_af=None,
include_filtered=False, **kwargs):
print_log(
'Read a VCF file ({0} filtered variants):\t{1}'.format(
('including' if include_filtered else 'excluding'), path
)
)
dtype = {
'CHROM': str, 'POS': int, 'ID': str, 'REF': str, 'ALT': str,
'QUAL': str, 'FILTER': str, 'INFO': str, 'FORMAT': str
}
vcf_lines = [d for d in _stream_vcf_lines(path=path, **kwargs)]
if not vcf_lines:
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pomegranate import HiddenMarkovModel, DiscreteDistribution
import numpy as np
from pomegranate import *
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
TIME_INTERVAL = "60s"
N_STATES = 16
# n_clusters = 10 # one in [10, 20, 50, 100]
## The main claim in the paper is that: there are problems that are not logged. In order to find them we need to consider alternative approach, e.g. measuring frequenceies, counts or coocurances.
## Therefore, it is important to
## Structure of the paper:
# 1)
# for TIME_INTERVAL in ["120s", "180s", "240s", "300s"]:
# for n_clusters in [10, 20, 50, 100]:
from collections import defaultdict
dd = defaultdict()
# for TIME_INTERVAL in ["120s"]:
# for n_clusters in [10]:
flag = True
for TIME_INTERVAL in ["180s", "240s", "300s"]:
# for TIME_INTERVAL in ["120s"]:
for n_clusters in [30]:
# for n_clusters in [30]:
if flag == True:
try:
po = pd.read_csv(
"/home/matilda/PycharmProjects/FailurePrediction/6_insights/0_different_clustering_results_anomaly_detection/different_clustering_results_AD_"+ str(N_STATES) + "_states.csv")
po.columns = ["exp_name", "scores"]
po.index = po.exp_name
po = po.drop(["exp_name"], axis=1)
dd = po.to_dict()["scores"]
except:
dd = defaultdict()
print(dd)
flag = False
for run_id in range(0, 2):
print("------"*20)
print("Processsing time interval: {}, with n_clusters {} and round {}".format(TIME_INTERVAL, n_clusters, run_id))
pom_data = | pd.read_csv("/home/matilda/PycharmProjects/RCA_logs/2_copy_original_data/Fault-Injection-Dataset-master/nova.tsv", sep="\t") | pandas.read_csv |
'''
Replica of Jupyter notebook - useful for debugging SA code.
'''
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import cProfile
import pstats
from sa import SACluster, ExponentialCoolingSchedule, CoruCoolingSchedule
if __name__ == '__main__':
df = pd.read_csv('data/Mall_Customers.csv')
unlabelled_x = df.iloc[:, [3, 4]].values
n_clusters = 5
max_iter = max(150 * unlabelled_x.shape[0], 10000)
cooling_schedule = CoruCoolingSchedule(1000, max_iter=max_iter)
#cooling_schedule = ExponentialCoolingSchedule(1000)
sa = SACluster(n_clusters=n_clusters, cooling_schedule=cooling_schedule,
dist_metric='euclidean', max_iter=max_iter, random_state=101)
#.run('sa.fit(unlabelled_x)', filename = 'pr.txt')
#p = pstats.Stats('pr.txt')
#p.sort_stats('cumulative').print_stats(70)
state, energy = sa.fit(unlabelled_x)
print( | pd.DataFrame(sa.search_history) | pandas.DataFrame |
from typing import Union
import pandas as pd
from sklearn.model_selection import train_test_split
from ..datastore import DataItem
def get_sample(
src: Union[DataItem, pd.core.frame.DataFrame], sample: int, label: str, reader=None
):
"""generate data sample to be split (candidate for mlrun)
Returns features matrix and header (x), and labels (y)
:param src: data artifact
:param sample: sample size from data source, use negative
integers to sample randomly, positive to
sample consecutively from the first row
:param label: label column title
"""
if type(src) == pd.core.frame.DataFrame:
table = src
else:
table = src.as_df()
# get sample
if (sample == -1) or (sample >= 1):
# get all rows, or contiguous sample starting at row 1.
raw = table.dropna()
labels = _get_label_from_raw(raw, label)
raw = raw.iloc[:sample, :]
labels = labels.iloc[:sample]
else:
# grab a random sample
raw = table.dropna().sample(sample * -1)
labels = _get_label_from_raw(raw, label)
return raw, labels, raw.columns.values
def _get_label_from_raw(raw, label):
"""
Just a stupid wrapper so that nice error will be raised when users give wrong label
"""
if label not in raw:
raise ValueError(f"Specified label could not be found: {label}")
return raw.pop(label)
def get_splits(
raw,
labels,
n_ways: int = 3,
test_size: float = 0.15,
valid_size: float = 0.30,
label_names: list = ["labels"],
random_state: int = 1,
):
"""generate train and test sets (candidate for mlrun)
cross validation:
1. cut out a test set
2a. use the training set in a cross validation scheme, or
2b. make another split to generate a validation set
2 parts (n_ways=2): train and test set only
3 parts (n_ways=3): train, validation and test set
:param raw: dataframe or numpy array of raw features
:param labels: dataframe or numpy array of raw labels
:param n_ways: (3) split data into 2 or 3 parts
:param test_size: proportion of raw data to set asid as test data
:param valid_size: proportion of remaining data to be set as validation
:param label_names: label names
:param random_state: (1) random number seed
"""
x, xte, y, yte = train_test_split(
raw, labels, test_size=test_size, random_state=random_state
)
if n_ways == 2:
return (x, y), (xte, yte)
elif n_ways == 3:
xtr, xva, ytr, yva = train_test_split(
x, y, train_size=1 - valid_size, random_state=random_state
)
return (xtr, ytr), (xva, yva), (xte, yte)
else:
raise Exception("n_ways must be in the range [2,3]")
def save_test_set(
context,
data: dict,
header: list,
label: str = "labels",
file_ext: str = "parquet",
index: bool = False,
debug: bool = False,
):
"""log a held out test set
:param context: the function execution context
:param data: dict with keys 'xtest'. 'ytest', and optionally
'xcal', 'ycal' if n_ways=4 in `get_splits`
:param ytest: test labels, as np.ndarray output from `get_splits`
:param header: ([])features header if required
:param label: ("labels") name of label column
:param file_ext: format of test set file
:param index: preserve index column
:param debug: (False)
"""
if all(x in data.keys() for x in ["xtest", "ytest"]):
test_set = pd.concat(
[
pd.DataFrame(data=data["xtest"], columns=header),
pd.DataFrame(data=data["ytest"].values, columns=[label]),
],
axis=1,
)
context.log_dataset("test_set", df=test_set, format=file_ext, index=index)
if all(x in data.keys() for x in ["xcal", "ycal"]):
cal_set = pd.concat(
[
pd.DataFrame(data=data["xcal"], columns=header),
| pd.DataFrame(data=data["ycal"].values, columns=[label]) | pandas.DataFrame |
import warnings
from collections import OrderedDict
from datetime import time
import tables as tb
import pandas as pd
import pandas.lib as lib
import numpy as np
import pandas.io.pytables as pdtables
from trtools.compat import izip, pickle
from trtools.io.common import _filename
from trtools.io.table_indexing import create_slices
MIN_ITEMSIZE = 10
class MismatchColumnsError(Exception):
pass
def convert_frame(df):
"""
Input: DataFrame
Output: pytable table description and pytable compatible recarray
"""
sdict = OrderedDict()
atoms = OrderedDict()
types = OrderedDict()
#index
index_name = df.index.name or 'pd_index'
converted, inferred_type, atom = _convert_obj(df.index)
atoms[index_name] = atom
sdict[index_name] = converted
types[index_name] = inferred_type
# columns
for col in df.columns:
converted, inferred_type, atom = _convert_obj(df[col])
atoms[col] = atom
sdict[col] = converted
types[col] = inferred_type
# create table desc
desc = OrderedDict()
for pos, data in enumerate(atoms.items()):
k, atom = data
col = tb.Col.from_atom(atom, pos=pos)
desc[str(k)] = col
# create recarray
dtypes = [(str(k), v.dtype) for k, v in list(sdict.items())]
recs = np.recarray(shape=len(df), dtype=dtypes)
for k, v in list(sdict.items()):
recs[str(k)] = v
return desc, recs, types
def _convert_obj(obj):
"""
Convert a series to pytables values and Atom
"""
if isinstance(obj, pd.DatetimeIndex):
converted = obj.asi8
return converted, 'datetime64', tb.Int64Atom()
elif isinstance(obj, pd.PeriodIndex):
converted = obj.values
return converted, 'periodindex', tb.Int64Atom()
elif isinstance(obj, pd.PeriodIndex):
converted = obj.values
return converted, 'int64', tb.Int64Atom()
inferred_type = lib.infer_dtype(obj)
values = np.asarray(obj)
if inferred_type == 'datetime64':
converted = values.view('i8')
return converted, inferred_type, tb.Int64Atom()
if inferred_type == 'string':
# TODO, am I doing this right?
converted = np.array(list(values), dtype=np.bytes_)
itemsize = converted.dtype.itemsize
# for OBT, can't assume value will be right for future
# frame keys
if itemsize < MIN_ITEMSIZE:
itemsize = MIN_ITEMSIZE
converted = converted.astype("S{0}".format(itemsize))
return converted, inferred_type, tb.StringAtom(itemsize)
elif inferred_type == 'unicode':
# table's don't seem to support objects
raise Exception("Unsupported inferred_type {0}".format(inferred_type))
converted = np.asarray(values, dtype='O')
return converted, inferred_type, tb.ObjectAtom()
elif inferred_type == 'datetime':
converted = np.array([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return converted, inferred_type, tb.Time64Atom()
elif inferred_type == 'integer':
converted = np.asarray(values, dtype=np.int64)
return converted, inferred_type, tb.Int64Atom()
elif inferred_type == 'floating':
converted = np.asarray(values, dtype=np.float64)
return converted, inferred_type, tb.Float64Atom()
raise Exception("Unsupported inferred_type {0} {1}".format(inferred_type, str(values[-5:])))
def _handle(obj):
if isinstance(obj, tb.file.File):
handle = obj
else:
handle = obj._v_file
return _wrap(handle)
def _meta(obj, meta=None):
obj = _unwrap(obj)
if isinstance(obj, tb.file.File):
obj = obj.root
return _meta_file(obj, meta)
handle = _handle(obj)
type = handle.type
if type == 'directory':
return _meta_dir(obj, meta)
return _meta_file(obj, meta)
def _meta_file(obj, meta):
if meta:
obj._v_attrs.pd_meta = meta
return
try:
meta = obj._v_attrs.pd_meta
if isinstance(meta, str):
meta = pickle.loads(meta)
return meta
except:
return {}
def _meta_path(obj):
import os.path
dir = os.path.dirname(obj._v_file.filename)
filename = obj._v_pathname[1:]
bits = filename.split('/')
bits.append('meta')
filename = ".".join(bits)
filepath = os.path.join(dir, filename)
return filepath
def _meta_dir(obj, meta=None):
filepath = _meta_path(obj)
if meta:
with open(filepath, 'wb') as f:
pickle.dump(meta, f)
return
try:
with open(filepath, 'rb') as f:
meta = pickle.load(f)
return meta
except:
return {}
def _name(table):
try:
name = table.attrs.pandas_name
except:
name = table._v_name
return name
def _columns(table):
try:
columns = list(_meta(table)['columns'])
except:
# assume first is index
columns = table.colnames[1:]
return columns
def _index_name(obj):
if isinstance(obj, pd.DataFrame):
return _index_name_frame(obj)
return _index_name_table(obj)
def _index_name_table(table):
try:
index_name = _meta(table)['index_name']
except:
# assume first is index
index_name = table.colnames[0]
return index_name
def _index_name_frame(df):
#TODO support multiindex
index = df.index
def unconvert_obj(values, type):
if type == 'datetime64':
return values.astype("M8[ns]")
if type == 'string':
return values.astype(np.unicode_)
return values
def unconvert_index(index_values, type):
return pdtables._unconvert_index(index_values, type)
def create_table(group, name, desc, types, filters=None, expectedrows=None, title=None, columns=None, index_name=None, extra_meta=None):
if title is None:
title = name
with warnings.catch_warnings(): # ignore the name warnings
table = group._v_file.createTable(group, name, desc, title,
expectedrows=expectedrows, filters=filters)
meta = {}
meta['columns'] = columns or list(desc.keys())
meta['value_types'] = types
meta['index_name'] = index_name
meta['name'] = name
if extra_meta:
meta.update(extra_meta)
_meta(table, meta)
return table
def frame_to_table(name, df, group, filters=None, expectedrows=None, create_only=False, *args, **kwargs):
"""
create_only will create the table but not appending the DF.
Since the machinery for figuring out a table definition and converting values for
appending are the same.
"""
# TODO: potentially could change this to subset the DF so we don't convert and iterate over all
# the values
hfile = group._v_file
# kind of a kludge to get series to work
if isinstance(df, pd.Series):
series_name = 'vals'
df = pd.DataFrame({series_name:df}, index=df.index)
desc, recs, types = convert_frame(df)
columns = list(df.columns)
index_name = df.index.name or 'pd_index'
table = create_table(group, name, desc, types, filters=filters, columns=columns,
expectedrows=expectedrows, index_name=index_name,*args, **kwargs)
if not create_only:
table.append(recs)
hfile.flush()
def table_to_frame(table, where=None):
"""
Simple converison of table to DataFrame
"""
if where:
try:
data = table_where(table, where)
except Exception as err:
raise Exception("readWhere error: {0} {1}".format(where, str(err)))
else:
data = table.read()
df = table_data_to_frame(data, table)
return df
def copy_table_def(group, name, orig):
table_meta = _meta(orig)
desc = orig.description
types = table_meta['value_types']
index_name = table_meta['index_name']
columns = table_meta['columns']
expectedrows = orig.nrows
table = group.create_table(name, desc, types, columns=columns, index_name=index_name, expectedrows=expectedrows)
return table
def table_where(table, where):
"""
Optimized Where
"""
return table.readWhere(where)
def get_table_index(table, index_name=None, types=None):
"""
Get the pandas index from a pytable
"""
if index_name is None:
index_name = _index_name(table)
if index_name is None: #neither passed in or set in meta
return None
if types is None:
meta = _meta(table)
types = meta.setdefault('value_types', {})
index_values = table.col(index_name)
index = unconvert_index(index_values, types[index_name])
return index
def _data_names(data):
if hasattr(data, 'keys'):
return list(data.keys())
if hasattr(data, 'dtype'):
return data.dtype.names
def table_data_to_frame(data, table, columns=None):
"""
Given the pytables.recarray data and the metadata taken from table,
create a DataFrame
"""
columns = columns or _columns(table)
index_name = _index_name(table)
name = _name(table)
meta = _meta(table)
types = meta.setdefault('value_types', {})
index = None
if index_name:
if index_name not in _data_names(data): # handle case where we dont send index with data
index_values = table.col(index_name)
else:
index_values = data[index_name]
index = unconvert_index(index_values, types[index_name])
try:
columns.remove(index_name)
except ValueError:
pass
sdict = {}
for col in columns:
# recarrays have only str columns
temp = data[str(col)]
temp = unconvert_obj(temp, types[col])
sdict[col] = temp
df = pd.DataFrame(sdict, columns=columns, index=index)
df.name = name
return df
def _convert_param(param, base_type=None):
"""
A well not thought out function to convert params to the proper base type.
"""
if base_type == 'datetime64' and isinstance(param, str):
return pd.Timestamp(param).value
if isinstance(param, str): # quote the string params
param = "{0}".format(param.encode('UTF8'))
if isinstance(param, pd.Timestamp): # Timestamp itself is never valid type
param = param.value
return param
class HDFSql(object):
"""
HDFSql object. Kept in separate obj so we don't polute __getattr__ on table
"""
def __init__(self, table, mapping=None):
# TODO I could coalesce all types/mapping/table into one dict
# so HDFSql doesn't need to know about the table
self.table = table
self.mapping = mapping or {}
# assuming meta won't change during object lifetime...
self.meta = _meta(table)
self.types = self.meta['value_types']
def __getattr__(self, key):
key = self.get_valid_key(key)
try:
type = self.types[key]
except:
type = None
return HDFQuery(key, type)
raise AttributeError("No column")
def get_valid_key(self, key):
if key in self.table.colnames:
return key
# shortcuts
if key == 'index':
return _index_name(self.table)
if key in self.mapping:
return self.mapping[key]
raise AttributeError("No column")
class HDFQuery(object):
def __init__(self, base, base_type=None):
self.base = base
self.base_type = base_type
def base_op(self, other, op):
""" quick convert to pytable expression """
base = "{0} {1} {2}".format(self.base, op, _convert_param(other, self.base_type))
return HDFQuery(base, 'statement')
__eq__ = lambda self, other: self.base_op(other, "==")
__gt__ = lambda self, other: self.base_op(other, ">")
__ge__ = lambda self, other: self.base_op(other, ">=")
__lt__ = lambda self, other: self.base_op(other, "<")
__le__ = lambda self, other: self.base_op(other, "<=")
def __and__(self, other):
base = "({0}) & ({1})".format(self.base, other)
return HDFQuery(base)
def __or__(self, other):
base = "({0}) | ({1})".format(self.base, other)
return HDFQuery(base)
def __repr__(self):
return str(self.base)
def hdf5_obj_repr(self, obj):
cls = self.__class__.__name__
return "{0}\n\n{1}".format(cls, repr(obj))
class HDF5Wrapper(object):
def __repr__(self):
return hdf5_obj_repr(self, self.obj)
def keys(self):
return list(self.obj._v_children.keys())
def meta(self, key=None, value=None):
meta = _meta(self)
if key and value:
meta[key] = value
# store meta
_meta(self, meta)
return meta
if key:
# single val
return meta[key]
return meta
def _unwrap(obj):
if isinstance(obj, HDF5Wrapper):
return obj.obj
return obj
def _wrap(obj, parent=None):
"""
Wrap the pytables object with an appropiate Object.
Note: since only obj, parent is passed in here, all other params need to be stored
in _meta. This is to make creation/reading the same process
"""
if isinstance(obj, tb.group.RootGroup):
return HDF5Group(obj, parent)
if isinstance(obj, tb.Group):
return HDF5Group(obj, parent)
if isinstance(obj, tb.Table):
return HDF5Table(obj)
if isinstance(obj, tb.file.File):
return HDF5Handle(obj)
return obj
class HDF5Handle(HDF5Wrapper):
"""
This wraps around the handle object
"""
def __init__(self, filepath, mode='a', type=None):
if isinstance(filepath, tb.file.File):
return self._init_from_handle(filepath)
self.filepath = filepath
self.mode = mode
self.obj = None
self.obj = self.open(self.mode)
meta = _meta(self.obj)
if 'type' in meta:
assert type is None or meta['type'] == type # these should never mismatch
type = meta['type']
if type is None:
type = 'file' # default
if self.mode != 'r':
meta['type'] = type
_meta(self.obj, meta)
def _init_from_handle(self, handle):
self.filepath = handle.filename
self.mode = handle.mode
self.obj = handle
meta = _meta(handle)
type = meta.setdefault('type', 'file')
if self.mode != 'r':
_meta(handle, meta)
self.type = type
@property
def handle(self):
return self.obj
def keys(self):
return list(self.root._v_children.keys())
def reopen(self):
self.obj = self.open(self.mode)
def open(self, mode="a", warn=True):
handle = tb.openFile(self.filepath, mode)
return handle
def close(self):
if self.obj is not None and self.obj.isopen:
self.obj.close()
def create_group(self, group_name, filters=None, meta=None, root=None):
"""
Create HDFPanelGroup
"""
handle = self.handle
if root is None:
root = handle.root
group = handle.createGroup(root, group_name, group_name, filters=filters)
if meta is None:
meta = {}
meta['group_type'] = 'default'
meta['filters'] = filters
_meta(group, meta)
return _wrap(group, self)
def __getattr__(self, key):
if hasattr(self.obj, key):
val = getattr(self.obj, key)
return _wrap(val, self)
if hasattr(self.obj.root, key):
val = getattr(self.obj.root, key)
return _wrap(val, self)
raise AttributeError()
def __getitem__(self, key):
if hasattr(self.obj.root, key):
val = getattr(self.obj.root, key)
return _wrap(val, self)
raise KeyError()
class HDF5Group(HDF5Wrapper):
def __init__(self, group, handle):
self.obj = group
self.handle = handle
self.filters = None
@property
def group(self):
return self.obj
def create_table(self, name, desc, types, filters=None, expectedrows=None, title=None, columns=None, index_name=None):
table = create_table(self.group, name, desc, types,
filters, expectedrows, title, columns, index_name)
return _wrap(table)
def frame_to_table(self, name, df, *args, **kwargs):
group = self.group
frame_to_table(name, df, group, *args, **kwargs)
def create_group(self, *args, **kwargs):
return self.handle.create_group(*args, root=self.obj, **kwargs)
def __getitem__(self, key):
if hasattr(self.obj, key):
val = getattr(self.obj, key)
return _wrap(val)
raise KeyError()
def __getattr__(self, key):
if hasattr(self.obj, key):
val = getattr(self.obj, key)
return _wrap(val)
raise AttributeError()
class HDF5Table(HDF5Wrapper):
def __init__(self, table, mapping=None, cache_index=True):
self.obj = table
self.mapping = mapping or {}
self.cache_index = cache_index
self._index = None
self._ix = None
_columns = None
@property
def columns(self):
if self._columns is None:
self._columns = _meta(self.table)['columns']
return self._columns
@property
def table(self):
return self.obj
@property
def index(self):
if self._index is None and self.cache_index:
self._index = CachingIndex(self)
return self._index
def append(self, data, flush=False):
if isinstance(data, pd.DataFrame):
self._append_frame(data, flush)
def _append_frame(self, df, flush=False):
desc, recs, types = convert_frame(df)
if not np.all(df.columns == self.columns):
raise MismatchColumnsError("HDFTable and DataFrame columns are not the same {0} vs {1}".format(
df.columns, self.columns))
self.table.append(recs)
if flush:
self.table.flush()
def keys(self):
return self.table.colnames
@property
def sql(self):
return HDFSql(self.table, self.mapping)
def __getitem__(self, key):
# TODO This can be faster if we cache the getWhereList somewhere on disk
key = _convert_param(key)
if isinstance(key, HDFQuery):
return self.query(key)
if isinstance(key, slice):
data = self.table[key]
df = table_data_to_frame(data, self.table)
return df
if isinstance(key, np.ndarray):
if key.dtype == 'bool':
return self._getitem_bools(key)
if key.dtype == 'int':
return self._getitem_ints(key)
try:
# list of slices
if isinstance(key[0], slice):
return self._getitem_slices(key)
except:
pass
def _getitem_slices(self, key):
parts = []
for slice in key:
part = self.table[slice]
parts.append(part)
data = np.concatenate(parts)
df = table_data_to_frame(data, self.table)
return df
def _getitem_ints(self, key):
slices = create_slices(key)
return self._getitem_slices(slices)
def _getitem_bools(self, key):
slices = create_slices(key)
return self._getitem_slices(slices)
def query(self, query):
where = str(query)
df = table_to_frame(self.table, where=where)
return df
def __getattr__(self, key):
if hasattr(self.obj, key):
val = getattr(self.obj, key)
return _wrap(val)
raise AttributeError()
@property
def ix(self):
# start splitting out
if self._ix is None:
self._ix = SimpleIndexer(self)
return self._ix
def add_index(self, col):
column = self.col(col)
if not column.is_indexed:
print(("Creating Index on {0}".format(col)))
num = column.createCSIndex()
print(("Index created with {0} vals".format(num)))
else:
print(("Index already exists {0}. Reindex?".format(col)))
def reindex(self, col):
column = self.col(col)
if column.is_indexed:
print(("Re-indexing on {0}".format(col)))
column.reIndex()
else:
print(("{0} is not indexed".format(col)))
def reindex_all(self):
cols = self.table.colnames
for col in cols:
self.reindex(col)
class CachingIndex(object):
def __init__(self, obj):
self.obj = obj
self._index = get_table_index(obj.table)
def __getattr__(self, key):
if hasattr(self._index, key):
return getattr(self._index, key)
raise AttributeError()
__eq__ = lambda self, other: self._comparison('__eq__', other)
__ne__ = lambda self, other: self._comparison('__ne__', other)
__gt__ = lambda self, other: self._comparison('__gt__', other)
__ge__ = lambda self, other: self._comparison('__ge__', other)
__lt__ = lambda self, other: self._comparison('__lt__', other)
__le__ = lambda self, other: self._comparison('__le__', other)
def _comparison(self, op, other):
# TODO add gt, ge, lt, le comparisons that output IndexSlice.
index_op = getattr(self._index, op)
if isinstance(self._index, pd.DatetimeIndex):
return self._datetime_comparison(index_op, other)
result = index_op(other)
return result
def _datetime_comparison(self, op, other):
other = | pd.Timestamp(other) | pandas.Timestamp |
"""Test functions in owid.datautils.dataframes module.
"""
import numpy as np
import pandas as pd
from pytest import warns
from typing import Any, Dict
from owid.datautils import dataframes
class TestCompareDataFrames:
def test_with_large_absolute_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_absolute_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_absolute_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3.1]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, False]}))
def test_with_large_relative_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.5,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_relative_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.3,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_relative_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.4,
).equals(pd.DataFrame({"col_01": [False, True]}))
def test_with_dataframes_of_equal_values_but_different_indexes(self):
# Even if dataframes are not identical, compare_dataframes should return all Trues (since it does not care about
# indexes, only values).
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "c"]}).set_index(
"col_02"
),
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_two_dataframes_with_object_columns_with_nans(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
df2=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
).equals(pd.DataFrame({"col_01": [True, True, True]}))
class TestAreDataFramesEqual:
def test_on_equal_dataframes_with_one_integer_column(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_one_element(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 0]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_type(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3.0]}),
)[0]
def test_on_equal_dataframes_containing_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, np.nan]}),
df2=pd.DataFrame({"col_01": [1, 2, np.nan]}),
)[0]
def test_on_equal_dataframes_containing_only_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [np.nan, np.nan]}),
df2=pd.DataFrame({"col_01": [np.nan, np.nan]}),
)[0]
def test_on_equal_dataframes_both_empty(self):
assert dataframes.are_equal(df1=pd.DataFrame(), df2=pd.DataFrame())[0]
def test_on_equal_dataframes_with_various_types_of_columns(self):
assert dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_almost_equal_dataframes_but_columns_sorted_differently(self):
assert not dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_02": [0.1, 0.2],
"col_01": [1, 2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_unequal_dataframes_with_all_columns_different(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_03": [0.1, 0.2], "col_04": [1, 2]}),
)[0]
def test_on_unequal_dataframes_with_some_common_columns(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_01": [1, 2], "col_03": [1, 2]}),
)[0]
def test_on_equal_dataframes_given_large_absolute_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
)[0]
def test_on_unequal_dataframes_given_large_absolute_tolerance(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
)[0]
def test_on_equal_dataframes_given_large_relative_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1]}),
df2= | pd.DataFrame({"col_01": [2]}) | pandas.DataFrame |
import os
from datetime import datetime
import time
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import seaborn as sns
import math
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.seasonal import seasonal_decompose
from scipy import stats
from itertools import product
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import os
"""
Read the info and train data
"""
info = pd.read_csv("asset_details.csv")
ctrain = pd.read_csv("train.csv")
#print(info.head(10))
# Impute missing time value
def c_time_sub(asset_id,data=ctrain):
df=data[ctrain["Asset_ID"]==asset_id].set_index("timestamp")
df=df.reindex(range(df.index[0],df.index[-1]+60,60), method="pad")
return df
# subgroup BTC(bitcoin)
btc = c_time_sub(asset_id=1)
# subgroup ETH(Ethereum)
eth = c_time_sub(asset_id=6)
# subgroup cardano(bitcoin)
ada = c_time_sub(asset_id=3)
#print("btc",btc.head(10),"eth",eth.head(10),"ADA",ada.head(10))
# time frame selection: from datetime to timestamp
totimestamp= lambda s: np.int32(time.mktime(datetime.strptime(s,"%d/%m/%Y").timetuple()))
# Log Return
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
# Data Selection
def crypto_sub(asset_id ,data= ctrain ):
df = data[data["Asset_ID"]==asset_id].reset_index(drop = True)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
df = df.set_index('timestamp')
return df
#Subgroup Bitcoin
btc1=crypto_sub(asset_id=1)
#Subgroup Eth
eth1=crypto_sub(asset_id=6)
#Subgroup Ada
ada1=crypto_sub(asset_id=3)
"""
Data visualization
"""
info_s=info.sort_values("Weight")
fig_index=px.bar(info_s,x="Asset_Name" , y="Weight", color="Weight", title="Popular Cryptocurrency Weight Distribution")
#fig_index.show()
"""
Cryptocurrency Log return correlation plot for 2021
"""
# time frame selection: from datetime to timestamp
totimestamp= lambda s: np.int32(time.mktime(datetime.strptime(s,"%d/%m/%Y").timetuple()))
# Log Return
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
all2021=pd.DataFrame([])
for asset_id, asset_name in zip(info.Asset_ID, info.Asset_Name):
asset=ctrain[ctrain["Asset_ID"]==asset_id].set_index("timestamp")
asset=asset.loc[totimestamp("01/01/2021"):totimestamp("21/09/2021")]
asset=asset.reindex(range(asset.index[0],asset.index[-1]+60,60), method="pad")
lret=log_return(asset.Close.fillna(0))[1:]
all2021=all2021.join(lret,rsuffix=asset_name,how="outer")
plt.imshow(all2021.corr());
plt.yticks(info.Asset_ID, info.Asset_Name.values)
plt.xticks(info.Asset_ID, info.Asset_Name.values,rotation="vertical");
plt.colorbar()
"""
Closing price Trend in 2021
"""
# Impute Missing Time Value
def c_time_sub(asset_id, data=ctrain):
df=data[ctrain["Asset_ID"]==asset_id].set_index("timestamp")
df=df.reindex(range(df.index[0],df.index[-1]+60,60), method="pad")
return df
#1. Subgroup Bitcoin
btc=c_time_sub(asset_id=1)
(btc.index[1:] -btc.index[:-1]).value_counts().head()
btc.head()
#2. Subgroup Ethereum
eth=c_time_sub(asset_id=6)
(btc.index[1:] -btc.index[:-1]).value_counts().head()
eth.head()
#3. Subgroup Cardano
ada=c_time_sub(asset_id=3)
(ada.index[1:] -ada.index[:-1]).value_counts().head()
ada.head()
# Create time interval for 2021
def dur(start,end,data):
df=data.loc[totimestamp(start): totimestamp(end)]
return df
btc2021= dur(start="01/01/2021", end="21/09/2021", data=btc)
eth2021= dur(start="01/01/2021", end="21/09/2021", data=eth)
ada2021= dur(start="01/01/2021", end="21/09/2021", data=ada)
# Plot the Closing Price for BTC, ETH, ADA
f= plt.figure(figsize=(10,12))
def gplot(no , data, price, label, ylabel, color):
ax=f.add_subplot(no)
plt.plot(data[price], label=label, color=color)
plt.legend()
plt.xlabel("Time")
plt.ylabel(ylabel)
return plt
gplot(no=311, data=btc, price="Close" , label="BTC 2021 Overall Performance", ylabel="BTC Closing Price", color="Lightskyblue")
gplot(no=312, data=eth, price="Close" ,label="ETH 2021 Overall Performance", ylabel="ETH Closing Price", color="Coral")
gplot(no=313, data=ada, price="Close" ,label="Cardano 2021 Overall Performance", ylabel="ADA Closing Price", color="khaki")
plt.tight_layout()
plt.show()
# Return rate for BTC,ETH and ADA
f= plt.figure(figsize=(10,12))
gplot(no=311, data=btc, price="Target" , label="BTC 2021 15min Return Residue", ylabel="BTC residual return", color="Aqua")
gplot(no=312, data=eth, price="Target" ,label="ETH 2021 15min Return Residue", ylabel="ETH residual return", color="Pink")
gplot(no=313, data=ada, price="Target" ,label="ADA 2021 15min Return Residue", ylabel="ADA residual return", color="gold")
plt.tight_layout()
plt.show()
def c_chart(data,label):
candlestick = go.Figure(data = [go.Candlestick(x =data.index,
open = data[('Open')],
high = data[('High')],
low = data[('Low')],
close = data[('Close')])])
candlestick.update_xaxes(title_text = 'Time',
rangeslider_visible = True)
candlestick.update_layout(
title = {
'text': '{:} Candelstick Chart'.format(label),
"y":0.8,
"x":0.5,
'xanchor': 'center',
'yanchor': 'top'})
candlestick.update_yaxes(title_text = 'Price in USD', ticksuffix = '$')
return candlestick
btc1.head()
btc_candle=c_chart(btc1[-90:], label="BTC Price")
btc_candle.show()
eth_candle=c_chart(eth1[100888:100988], label="ETH Price ")
eth_candle.show()
ada_candle=c_chart(eth1[-500:-400], label="ETH Price ")
ada_candle.show()
# ARIMA Model
def mini_data(data):
df=data[["Close","Low","High","Open"]]
return df
btc1_mini=mini_data(btc1)
eth1_mini=mini_data(eth1)
ada1_mini=mini_data(ada1)
btc1.head()
# Function to Plot
plt.rcParams["figure.figsize"]=(15,7)
def season_df(data, label):
df=data.resample("M").mean()
seasonal_decompose(df.Close).plot()
print(label)
return plt.show()
season_df(data=btc1_mini, label="BTC Seasonal Decomposition")
season_df(data=eth1_mini, label="ETH Seasonal Decomposition")
#ADA seasonal decompostion
season_df(data=ada1_mini, label="ADA Seasonal Decomposition")
#Box-Cox Transformation
btc_month=btc1_mini.resample("M").mean()
btc_month["close_box"], lmbda=stats.boxcox(btc_month.Close)
#btc_month['close_box'], lmbda = stats.boxcox(btc_month.Close)
qs = range(0, 3)
ps = range(0, 3)
d=1
parameters = product(ps, qs)
parameters_list = list(parameters)
len(parameters_list)
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model = SARIMAX(btc_month.close_box, order=(param[0], d, param[1])).fit(disp=-1)
except ValueError:
print('bad parameter combination:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
result_table = | pd.DataFrame(results) | pandas.DataFrame |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright [2020] [Indian Institute of Science, Bangalore]
SPDX-License-Identifier: Apache-2.0
"""
__name__ = "Instantiate a city and dump instantiations as json"
import os, sys
import json
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import time
#Data-processing Functions
from modules.processDemographics import *
from modules.processGeoData import *
# Functions to instantiate individuals to houses, schools, workplaces and community centres
from modules.assignHouses import *
from modules.assignSchools import *
from modules.assignWorkplaces import *
# get the city and target population as inputs
def instantiate(city, targetPopulation, averageStudents, averageWorkforce):
#create directory to store parsed data
path = "data/%s_population%s_students%s"%(city, targetPopulation, averageStudents)
if not os.path.exists(path):
os.mkdir(path)
targetPopulation = int(targetPopulation)
averageStudents = int(averageStudents)
averageWorkforce = float(averageWorkforce)
print("processing data ready ...")
start = time.time()
cityGeojson = "data/base/"+city+"/city.geojson"
cityGeoDF = parse_geospatial_data(cityGeojson)
if "cityProfile.json" in os.listdir("data/base/"+city):
cityProfile = "data/base/"+city+"/cityProfile.json"
ageDistribution, householdDistribution, schoolDistribution, householdSizes, maxWorkplaceDistance = process_city_profile(cityProfile)
demographicsData = pd.read_csv("data/base/"+city+"/demographics.csv")
housesData = | pd.read_csv("data/base/"+city+"/households.csv") | pandas.read_csv |
# http://github.com/timestocome
# Attempt to predict nasdaq indexes and find outliers
# http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/TestForRandomness_RunsTest.pdf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
######################################################################
# load data
########################################################################
# read in data file
data = | pd.read_csv('data/nasdaq.csv', parse_dates=True, index_col=0) | pandas.read_csv |
# Author: <NAME>, PhD
#
# Email: <EMAIL>
#
#
# Ref: https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jaccard.html#scipy.spatial.distance.jaccard
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
# Ref: https://sourceforge.net/p/rdkit/mailman/message/24426410/
# Ref: https://python-graph-gallery.com/197-available-color-palettes-with-matplotlib/
# Ref: https://stackoverflow.com/questions/57568311/matplotlib-scatter-issue-with-python-3-x
# Ref: https://www.science-emergence.com/Articles/How-to-create-a-scatter-plot-with-several-colors-in-matplotlib-/
# Ref: https://www.pluralsight.com/guides/choosing-color-palettes
# Ref: https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/colorPaletteCheatsheet.pdf
# Ref: https://htmlcolorcodes.com/color-picker/
# #1179B0
# #F58C30
# #74BB5A
# #BC412C
# #795B9A
# #764A0C
# #D37DB5
# #7A7A7A
# #B8C449
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import pandas as pd
import rdkit
from rdkit import Chem
from scaffold_keys import smiles2bmscaffold, smiles2scaffoldkey, sk_distance
#import matplotlib
#matplotlib.use('agg')
#import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
from sklearn.manifold import TSNE
import math
from knn import get_mol, get_fingerprint
def tr_expand_coords (df, source_col, id_col, delimiter):
df_orig = df
df = df[source_col].str.split(delimiter, expand = True)
nr_cols = len (df.columns)
columns = []
for i in range (nr_cols):
columns.append('Dim_' + str(i + 1))
df.columns = columns
df = df.astype('int32')
#df[id_col] = df_orig[id_col]
df = pd.concat([df_orig, df], axis = 1)
return (df)
def closest_scaffold (sk_struct, df_space, idx, nr_structures):
print ('[*] Processing structure %d out of %d .' % (idx, nr_structures))
df = df_space
df['sk_struct'] = sk_struct
df['sk_distance'] = df.apply (lambda x: sk_distance (x['sk_struct'], x['scaffold_key']), axis = 1)
df = df.sort_values (['sk_distance'])
closest_scaffold_order = df['order'].values[0]
return (closest_scaffold_order)
def get_coordinates (hc, bucket_id):
# print (bucket_id)
coordinates = []
coordinates = hc.coordinates_from_distance(bucket_id - 1)
coordinate_str = ''
nr_dim = len (coordinates)
for i in range(nr_dim):
coordinate_str += str(coordinates[i]) + ';'
coordinate_str = coordinate_str[:-1]
return (coordinate_str)
def altered_tsne (df_structures, str_colname, id_colname, df_space, hc_order, n_dim):
hilbert_curve = HilbertCurve(hc_order, n_dim)
df_structures = df_structures[[id_colname, str_colname]].copy()
df_structures['bms'] = df_structures.apply (lambda x: smiles2bmscaffold (x[str_colname]), axis = 1)
# filter out invalid of nonsense/empty scaffolds:
df_structures = df_structures[df_structures['bms'] != 'NA']
#df_space['jk'] = 1
df_space = df_space.rename (columns = {
'structure': 'ref_scaffold_smiles'
})
nr_scaffolds = df_space.shape[0]
bucket_nr = math.pow(math.pow(2, hc_order), n_dim)
bucket_size = float(nr_scaffolds / (bucket_nr - 1))
#df_structures['jk'] = 1
df_structures['sk_struct'] = df_structures.apply (lambda x: smiles2scaffoldkey(x['bms'], trailing_inchikey = False), axis = 1)
print ('nr_scaffolds: %d, bucket_nr: %d, bucket_size %f' % (nr_scaffolds, bucket_nr, bucket_size))
print ('[*] Number of input structures: %d' % (df_structures.shape[0]))
df_structures = df_structures[df_structures['sk_struct'] != 'NA']
print ('[*] Number of structures for which scaffold_key was generated: %d' % (df_structures.shape[0]))
#df3 = df_space.merge (df_structures, on = 'jk', how = 'inner')
#df = df3.copy()
#df = df.reset_index (drop = True)
df_structures = df_structures.reset_index()
df_structures['idx'] = df_structures.index + 1
nr_structures = df_structures.shape[0]
df_structures['closest_order'] = df_structures.apply (lambda x: closest_scaffold (x['sk_struct'], df_space, x['idx'], nr_structures), axis = 1)
df_structures['bucket_id'] = df_structures.apply (lambda x: get_bucket_id(x['closest_order'], bucket_size), axis = 1)
df_structures['embedded_hs_coordinates'] = df_structures.apply (lambda x: get_hilbert_coordinates(hilbert_curve, x['bucket_id']), axis = 1)
df = df_structures
#df = df_structures.merge (df_space, left_on = 'closest_order', right_on = 'order', how = 'inner')
# ignore ->
"""
df = df.sort_values(['sk_distance'])
df = df.groupby([str_colname], as_index = False).agg ({
id_colname: 'first',
'hs_coordinates': 'first',
'scaffold_id': 'first',
'ref_scaffold_smiles': 'first'
})
"""
# <- ignore
df = tr_expand_coords (df, 'embedded_hs_coordinates', id_colname, delimiter = ';')
return (df)
def to_bitstring (fp):
return (fp.ToBitString())
def generate_fp (df):
df['bms_mol'] = df.apply(lambda x: get_mol (x['structure']), axis = 1)
df['bms_fp'] = df.apply (lambda x: get_fingerprint(x['bms_mol'], radius = 3, fplength = 2048), axis = 1)
df['bms_fp_str'] = df.apply (lambda x: to_bitstring(x['bms_fp']), axis = 1)
return (df)
def get_fp_np_array (fps):
first = True
fpl = 0
all_fp = []
fp_array = []
for i in range(len(fps)):
fp_array = []
fp = fps[i]
if first:
fpl = len(fp)
first = False
else:
if len(fp) != fpl:
print ('[ERROR] Fingerprint length mismatch. Terminating ...')
sys.exit (-1)
for j in range(len(fp)):
fp_array.append(int(fp[j]))
all_fp.append(fp_array)
all_fp = np.array(all_fp)
return (all_fp)
def embed_ref_scaffolds (df_ref_bms, tsne_model):
df_ref_bms = generate_fp (df_ref_bms)
print (df_ref_bms.head)
X = list(df_ref_bms['bms_fp_str'])
X = get_fp_np_array (X)
#print (X)
X_embedded = tsne_model.fit_transform(X)
print (X_embedded)
ids = list(df_ref_bms['scaffold_id'])
df_embedded = | pd.DataFrame ({'scaffold_id': ids, 'Dim_1': X_embedded[:,0], 'Dim_2': X_embedded[:,1]}) | pandas.DataFrame |
import pandas as pd
'''
Data pipeline for ingestion of 311-data datasets
General sections:
1. ACQUIRE: Download data from source
2. CLEAN: Perform data cleaning and organization before entering into SQL
3. INGEST: Add data set to SQL database
These workflows can be abstracted/encapsulated in order to better generalize
across tasks if necessary.
'''
### 1. ACQUIRE ###
# Code for automated data download goes here
### 2. CLEAN ###
# Load data file from TSV/CSV
### xNOTE: Can encapsulate this workflow and reapply for each data set
dfb = pd.read_table('311data2019.tsv',sep='\t') # For now assume data in this folder
# Format dates as datetime (Time intensive)
dfb['CreatedDate'] = | pd.to_datetime(dfb['CreatedDate']) | pandas.to_datetime |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import datetime
import xarray as xr
import pandas as pd
import numpy as np
from Bandit.bandit_multi_locations import read_file
from pyPRMS.ParamDb import ParamDb
# %%
hw_data_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/paramdb_v11/headwaters'
paramdb_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/paramdb_v11/paramdb_v11_gridmet_CONUS'
falcone_dir = '/Users/pnorton/GIS/gagesII_additiona_data/basinchar_and_report_sept_2011'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/bandit/poi_data'
nwis_file = f'{workdir}/*_pois.nc'
st_date = datetime.datetime(1980, 1, 1)
en_date = datetime.datetime(2019, 12, 31)
# %%
# xdf = xr.open_dataset(nwis_file, decode_cf=True, engine='netcdf4')
poi_xdf = xr.open_mfdataset(nwis_file, decode_cf=True, combine='nested', concat_dim='poi_id', engine='netcdf4')
poi_xdf
# %%
poi_xdf = xr.open_mfdataset(nwis_file, decode_cf=True, combine='nested', concat_dim='poi_id', engine='netcdf4')
poi_df = poi_xdf[['poi_name', 'latitude', 'longitude', 'drainage_area', 'drainage_area_contrib']].to_pandas()
poi_df.rename(columns={'drainage_area': 'da_obs', 'drainage_area_contrib': 'da_contrib_obs'}, inplace=True)
# Compute the correction factor for obs values
poi_df['da_ratio_obs'] = poi_df['da_contrib_obs'] / poi_df['da_obs']
# Any NaN values should default to a correction factor of 1.0
poi_df['da_ratio_obs'].fillna(value=1.0, inplace=True)
# Sometimes the full da and contributing da are swapped
poi_df['da_ratio_obs'] = np.where(poi_df['da_ratio_obs'] > 1.0, 1.0 / poi_df['da_ratio_obs'], poi_df['da_ratio_obs'])
poi_df['da_actual_obs'] = poi_df[['da_obs', 'da_contrib_obs']].min(axis=1)
poi_df.head()
# %%
df1 = poi_xdf['discharge'].loc[:, st_date:en_date].to_pandas()
# Get POIs which have no missing values in POR
missing_obs_cnt_df = df1.isnull().sum(axis=1)
missing_obs_cnt_df.name = 'missing_obs'
poi_contiguous = missing_obs_cnt_df[missing_obs_cnt_df == 0].index.tolist()
print(f'POIs with contiguous observations: {len(poi_contiguous)}')
# %%
missing_obs_cnt_df.head()
# %%
# da_nwis = xdf['drainage_area'].to_pandas()
# print(da_nwis.head())
# Convert area to acres
# da_nwis *= 640.
# print(da_nwis.head())
# %%
# da_stuff = xdf[['drainage_area', 'drainage_area_contrib']].to_pandas()
# %%
# da_stuff.head(50)
# %%
missing_obs_cnt_df['02157490']
# %% [markdown]
# ### Load headwater segments
# %%
segs_by_hw = read_file(f'{hw_data_dir}/hw_segs.csv')
segs_list = []
seg_to_hw = {}
for kk, vv in segs_by_hw.items():
# kk is the headwater number
# segs_list.append(kk)
for xx in vv:
seg_to_hw[xx] = kk
segs_list.append(xx)
# Get set of unique segment ID
segs_set = set(segs_list)
print(f'Total number of segments: {len(segs_list)}')
print(f'Unique number of segments: {len(segs_set)}')
# %%
# %% [markdown]
# ### Load parameter database
# %%
pdb = ParamDb(paramdb_dir, verbose=True, verify=True)
poi_to_seg = pdb.parameters.poi_to_seg
seg_to_poi = {vv: kk for kk, vv in poi_to_seg.items()}
poi_gage_id = pdb.parameters.get('poi_gage_id').data.tolist()
seg_cum_area = pdb.parameters.get_dataframe('seg_cum_area')
# Create list of POIs in headwater domains
hw_poi_list = []
for xx in segs_set:
try:
hw_poi_list.append(seg_to_poi[xx])
except KeyError:
pass
# print(f'{xx} has no POI')
print(f'Number of POIs in headwater areas: {len(hw_poi_list)}')
# Create list of POIs with the NHM drainage area
poi_areas = {'poi_id': [],
'hw_id': [],
'da_seg_cum': []}
for xx in poi_gage_id:
try:
if xx in hw_poi_list:
# Convert NHM acres to sq. mi.
poi_areas['poi_id'].append(xx)
poi_areas['hw_id'].append(seg_to_hw[poi_to_seg[xx]])
if poi_to_seg[xx] > 0:
poi_areas['da_seg_cum'].append(seg_cum_area.loc[poi_to_seg[xx]].values[0] * 0.0015625)
else:
# I think this only happened in NHM v1.0
poi_areas['da_seg_cum'].append(0)
except KeyError:
print(f'{xx} has no POI')
# Create a dataframe of NHM drainage by POI
poi_areas_df = pd.DataFrame.from_dict(poi_areas)
poi_areas_df.set_index('poi_id', inplace=True)
poi_areas_df.head()
# %%
# Merge POI information with seg_cum_area
poi_info_df = poi_df
poi_info_df = pd.merge(poi_info_df, poi_areas_df, how='inner', left_index=True, right_index=True)
poi_info_df = pd.merge(poi_info_df, missing_obs_cnt_df, how='inner', left_index=True, right_index=True)
poi_info_df.head()
# Add drainage area ratio between nwis/hydat and NHM
poi_info_df['da_ratio'] = poi_info_df['da_actual_obs'] / poi_info_df['da_seg_cum']
poi_info_df['da_ratio'] = np.where(poi_info_df['da_ratio'] > 1.0, 1.0 / poi_info_df['da_ratio'], poi_info_df['da_ratio'])
# %%
poi_info_df.info()
# %%
# %%
# %% [markdown]
# ### Get POIs in headwaters that have contiguous records
# %%
hw_contig = set(hw_poi_list) & set(poi_contiguous)
print(f'Number of contiguous POIs in headwaters: {len(hw_contig)}')
# %%
# %%
# %% [markdown]
# ### Load Falcone information
# %%
col_names = ['STAID', 'CLASS', 'HYDRO_DISTURB_INDX']
col_types = [str, str, int]
cols = dict(zip(col_names, col_types))
falcone_df = pd.read_excel(open(f'{falcone_dir}/gagesII_sept30_2011_conterm.xlsx', 'rb'), sheet_name='Bas_Classif',
usecols=[0, 1], dtype=cols)
falcone_df.rename(columns={'STAID': 'poi_id', 'CLASS': 'falcone_class'}, inplace=True)
falcone_df.set_index('poi_id', inplace=True)
falcone_df.info()
falcone_ids = falcone_df.index.tolist()
falcone_ref = falcone_df[falcone_df['falcone_class'] == 'Ref']
falcone_ref_ids = falcone_ref.index.tolist()
# %%
# %%
# Headwater POIs that are in Falcone dataset
hw_contig_falcone = set(falcone_ids) & hw_contig
print(f'Number of headwater POIs also in Falcone: {len(hw_contig_falcone)}')
hw_contig_ref = set(falcone_ref_ids) & hw_contig
print(f'Number of headwater POIs that are Falcone reference gages: {len(hw_contig_ref)}')
# %%
len(falcone_ref_ids)
# %%
poi_info_df = pd.merge(poi_info_df, falcone_df, how='inner', left_index=True, right_index=True)
poi_info_df.head()
# %%
poi_info_df.info()
# %%
# %%
# Remove POIs which are missing more than a given number of obs
df_reduce_1 = poi_info_df[poi_info_df['missing_obs'] < 3650]
# %%
df_reduce_1.info()
# %%
# Remove POIs that lack a DA
df_reduce_2 = df_reduce_1[df_reduce_1['da_actual_obs'].notna()]
# %%
df_reduce_2.info()
# %%
df_reduce_3 = df_reduce_2[df_reduce_2['da_ratio'] >= 0.9]
# %%
df_reduce_3.info()
# %%
df_reduce_3.head()
# %%
df_reduce_3.to_csv('/Users/pnorton/tmp/nhm_v11_hwobs_1.csv', sep='\t', index=True)
# %%
df_reduce_1.loc['02160105']
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
hw_id_set = set(df_reduce_3['hw_id'].tolist())
# %%
len(hw_id_set)
# %%
# poi_areas_df = pd.DataFrame.from_dict(poi_areas)
# poi_areas_df.set_index('poi_id', inplace=True)
hwid = {'hw_id': list(hw_id_set)}
hwid_df = pd.DataFrame.from_dict(hwid)
hwid_df['use_obs'] = 1
hwid_df.set_index('hw_id', inplace=True)
hwid_df.head()
# %%
hwid_df.to_csv('/Users/pnorton/tmp/nhm_v11_hwobsid_1.csv', sep='\t', index=True)
# %%
hw2_df = | pd.read_csv(f'{hw_data_dir}/hw_hrus.csv') | pandas.read_csv |
"""Code used for notebooks and data exploration on
https://github.com/oscovida/oscovida"""
import datetime
import math
import os
import pytz
import time
import joblib
import numpy as np
import pandas as pd
import IPython.display
from typing import Tuple, Union
# choose font - can be deactivated
from matplotlib import rcParams
from oscovida.data_sources import base_url, hungary_data, jhu_population_url, rki_data, rki_population_url, rki_population_backup_file
from oscovida.plotting_helpers import align_twinx_ticks, cut_dates, has_twin, limit_to_smoothed, uncertain_tail
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Tahoma', 'DejaVu Sans', 'Lucida Grande', 'Verdana']
rcParams['svg.fonttype'] = 'none'
# need many figures for index.ipynb and germany.ipynb
rcParams['figure.max_open_warning'] = 50
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import DateFormatter, date2num, MONDAY, WeekdayLocator
from matplotlib.ticker import ScalarFormatter, FuncFormatter, FixedLocator
from bisect import bisect
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# suppress warning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
LW = 3 # line width
# set up joblib memory to avoid re-fetching files
joblib_location = "./cachedir"
joblib_memory = joblib.Memory(joblib_location, verbose=0)
def compute_binder_link(notebook_name):
"""Given a string """
root_url = "https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/"
return root_url + notebook_name
def display_binder_link(notebook_name):
url = compute_binder_link(notebook_name)
# print(f"url is {url}")
IPython.display.display(
IPython.display.Markdown(f'[Execute this notebook with Binder]({url})'))
def clear_cache():
"""Need to run this before new data for the day is created"""
joblib_memory.clear()
def double_time_exponential(q2_div_q1, t2_minus_t1=None):
""" See https://en.wikipedia.org/wiki/Doubling_time"""
if t2_minus_t1 is None:
t2_minus_t1 = np.ones(q2_div_q1.shape)
return t2_minus_t1 * np.log(2) / np.log(q2_div_q1)
def report_download(url, df):
print(f"Downloaded data: last data point {df.columns[-1]} from {url}")
@joblib_memory.cache
def fetch_deaths_last_execution():
"""Use to remember at what time and date the last set of deaths was downloaded.
A bit of a hack as we didn't know how to get this out of joblib.
"""
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_cases_last_execution():
"""See fetch_deaths_last_execution"""
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_deaths():
"""Download deaths from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "deaths" + "_global.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_deaths_last_execution()
return df
@joblib_memory.cache
def fetch_deaths_US():
"""Download deaths for US states from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "deaths" + "_US.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
# fetch_deaths_last_execution_()
return df
@joblib_memory.cache
def fetch_cases():
"""Download cases from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "confirmed" + "_global.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_cases_last_execution()
return df
@joblib_memory.cache
def fetch_cases_US():
"""Download cases for US status from Johns Hopkins data repository"""
url = os.path.join(base_url, "time_series_covid19_" + "confirmed" + "_US.csv")
df = pd.read_csv(url, index_col=1)
report_download(url, df)
fetch_cases_last_execution()
return df
def get_country_data_johns_hopkins(country: str,
region: str = None, subregion: str = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Given a country name, return deaths and cases as a tuple of
pandas time series. Works for all (?) countries in the world, or at least
those in the Johns Hopkins data set. All rows should contain a datetime
index and a value.
"""
deaths = fetch_deaths()
cases = fetch_cases()
assert country in deaths.index, f"{country} not in available countries. These are {sorted(deaths.index)}"
# Some countries report sub areas (i.e. multiple rows per country) such as China, France, United Kingdom
# Denmark. In that case, we sum over all regions (by summing over the relevant rows).
tmp = deaths.loc[country]
if len(tmp.shape) == 1: # most countries (Germany, Italy, ...)
d = tmp
elif len(tmp.shape) == 2: # China, France, United Kingdom, ...
d = tmp.drop(columns=['Province/State']).sum()
d.rename("deaths", inplace=True)
else:
raise ValueError("Unknown data set structure for deaths {country}:", tmp)
tmp = cases.loc[country]
if len(tmp.shape) == 1:
c = tmp
elif len(tmp.shape) == 2:
c = tmp.drop(columns=['Province/State']).sum()
c.rename("cases", inplace=True)
else:
raise ValueError("Unknown data set structure for cases {country}:", tmp)
# make date string into timeindex
d.index = pd.to_datetime(d.index, errors="coerce", format="%m/%d/%y")
c.index = pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y")
# drop all rows that don't have data
# sanity check: how many do we drop?
if c.index.isnull().sum() > 3:
print(f"about to drop {c.index.isnull().sum()} entries due to NaT in index", c)
c = c[c.index.notnull()]
if d.index.isnull().sum() > 3:
print(f"about to drop {d.index.isnull().sum()} entries due to NaT in index", d)
d = d[d.index.notnull()]
# check there are no NaN is in the data
assert c.isnull().sum() == 0, f"{c.isnull().sum()} NaNs in {c}"
assert d.isnull().sum() == 0, f"{d.isnull().sum()} NaNs in {d}"
# label data
c.name = country + " cases"
d.name = country + " deaths"
return c, d
def get_US_region_list():
"""return list of strings with US state names"""
deaths = fetch_deaths_US()
return list(deaths.groupby("Province_State").sum().index)
def get_region_US(state, county=None, debug=False):
"""Given a US state name and county, return deaths and cases as a tuple of pandas time
series. (Johns Hopkins data set)
If country is None, then sum over all counties in that state (i.e. return
the numbers for the state.)
"""
if not county is None:
raise NotImplementedError("Can only process US states (no counties)")
deaths = fetch_deaths_US()
cases = fetch_cases_US()
assert state in deaths['Province_State'].values, \
f"{state} not in available states. These are {sorted(set(deaths['Province_State']))}"
if county is None:
tmpd = deaths.groupby('Province_State').sum()
d = tmpd.loc[state]
tmpc = cases.groupby('Province_State').sum()
c = tmpc.loc[state]
else:
raise NotImplementedError("Can't do counties yet.")
# Some countries report sub areas (i.e. multiple rows per country) such as China, France, United Kingdom
# Denmark. In that case, we sum over all regions.
# make date string into timeindex
d.index = pd.to_datetime(d.index, errors="coerce", format="%m/%d/%y")
c.index = pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y")
# drop all rows that don't have data
# sanity check: how many do we drop?
if c.index.isnull().sum() > 3:
if debug:
print(f"about to drop {c.index.isnull().sum()} entries due to NaT in index", c)
c = c[c.index.notnull()]
if d.index.isnull().sum() > 3:
if debug:
print(f"about to drop {d.index.isnull().sum()} entries due to NaT in index", d)
d = d[d.index.notnull()]
# check there are no NaN is in the data
assert c.isnull().sum() == 0, f"{c.isnull().sum()} NaNs in {c}"
assert d.isnull().sum() == 0, f"{d.isnull().sum()} NaNs in {d}"
# label data
country = f"US-{state}"
c.name = country + " cases"
d.name = country + " deaths"
return c, d
def compose_dataframe_summary(cases, deaths):
"""Used in per-country template to show data table.
Could be extended.
Expects series of cases and deaths (time-aligned), combines those in DataFrame and returns it
"""
df = pd.DataFrame()
df["total cases"] = cases
df["daily new cases"] = cases.diff()
if deaths is not None:
df["total deaths"] = deaths
df["daily new deaths"] = deaths.diff()
# drop first row with nan -> otherwise ints are shows as float in table
df = df.dropna().astype(int)
# change index: latest numbers shown first
df = df[::-1]
return df
@joblib_memory.cache
def fetch_data_germany_last_execution():
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
@joblib_memory.cache
def fetch_data_germany(include_last_day=True) -> pd.DataFrame:
"""Fetch data for Germany from Robert Koch institute and return as a pandas
DataFrame with the `Meldedatum` as the index.
Data source is https://npgeo-corona-npgeo-de.hub.arcgis.com . The text on that
webpage implies that the data comes from the Robert Koch Institute.
As an option (`include_last_day`), we can omit the last day with data from
the retrieved data sets (see reasoning below in source), as the data is
commonly update one day later with more accurate (and typically higher)
numbers.
"""
# outdated: datasource = "https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv"
datasource = rki_data
t0 = time.time()
print(f"Please be patient - downloading data from {datasource} ...")
germany = pd.read_csv(datasource)
delta_t = time.time() - t0
print(f"Completed downloading {len(germany)} rows in {delta_t:.1f} seconds.")
## create new column 'landkreis' and get rid of "SK " and "LK " for this
## - this is too simplistic. We have fields like "Region Hannover"
# germany['landkreis'] = germany['Landkreis'].apply(lambda s: s[3:])
# (at least) the last data from the Robert-Koch-Institute (RKI) seems not to be
# fully reported the day after. For example, on 3 April, the number of cases
# from RKI is well below what is expected. Example:
#
# From RKI (as of evening of 2020-04-03:)
# 2020-03-29 62653
# 2020-03-30 66692
# 2020-03-31 72333
# 2020-04-01 77464
# 2020-04-02 79625
#
# From Johns Hopkins (as of evening of 2020-04-03:):
# 2020-03-29 62095
# 2020-03-30 66885
# 2020-03-31 71808
# 2020-04-01 77872
# 2020-04-02 84794
#
# So we must assume that the RKI data will be corrected later; maybe the next day.
#
# To make our plots not inaccurate, we'll remove the last data point from the RKI data:
g2 = germany.set_index(pd.to_datetime(germany['Meldedatum']))
g2.index.name = 'date'
# get rid of last day in data if desired
if include_last_day == False:
last_day = g2.index.max()
sel = g2.index == last_day
cleaned = g2.drop(g2[sel].index, inplace=False)
else:
cleaned = g2
fetch_data_germany_last_execution()
return cleaned
def pad_cumulative_series_to_yesterday(series):
"""Given a time series with date as index and cumulative cases/deaths as values:
- if the last date in the index is older than yesterday, then
- add that date
- resample the series with a daily interval, using padding with last known value
- and return.
Required for <NAME> Data, where only a new data point is provided if
the numbers change, but the plotting algorithms need to know that there is
no change. Without this padding, the data set looks old as the last plotted
data point is the last one for which data is provided.
"""
now = datetime.datetime.now()
rki_tz = pytz.timezone('Europe/Berlin')
now_tz = datetime.datetime.now(rki_tz)
# remove time zone information from datetime, so we can compare against
# datatime dates from get_country_data which has no timezone information
# attached.
now = now.replace(tzinfo=None)
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
yesterday = today - pd.Timedelta("1D")
last = series.index.max()
if last < yesterday:
# repeat last data point with index for yesterday
series[yesterday] = series[last]
series2 = series.resample("1D").fillna(method="ffill")
return series2
else:
return series
def germany_get_region(state=None, landkreis=None, pad2yesterday=False):
""" Returns cases and deaths time series for Germany, and a label for the state/kreis.
If state is given, return sum of cases (as function of time) in that state (state=Bundesland)
If Landkreis is given, return data from just that Landkreis.
Landkreis seems unique, so there is no need to provide state and Landkreis.
[Should tidy up names here; maybe go to region and subregion in the function argument name, and
translate later.]
"""
germany = fetch_data_germany()
"""Returns two time series: (cases, deaths)"""
assert state or landkreis, "Need to provide a value for state or landkreis"
if state and landkreis:
raise NotImplementedError("Try to use 'None' for the state.")
# TODO: We need to check if this is important.
if state:
if not state in germany['Bundesland'].values:
raise Exception(
f"{state} not in available German states. These are "
f"{sorted(germany['Bundesland'].drop_duplicates())}"
)
land = germany[germany['Bundesland'] == state]
land = land.set_index(pd.to_datetime(land['Meldedatum']))
land.index.name = 'date'
land.sort_index(inplace=True)
# group over multiple rows for the same date
# (this will also group over the different landkreise in the state)
cases = land["AnzahlFall"].groupby('date').agg('sum').cumsum()
region_label = f'Germany-{state}'
cases.name = region_label + " cases"
# group over all multiple entries per day
deaths = land["AnzahlTodesfall"].groupby('date').agg('sum').cumsum()
deaths.name = region_label + " deaths"
if pad2yesterday:
deaths = pad_cumulative_series_to_yesterday(deaths)
cases = pad_cumulative_series_to_yesterday(cases)
return cases, deaths
if landkreis:
assert landkreis in germany['Landkreis'].values, \
f"{landkreis} not in available German districts. These are {sorted(germany['Landkreis'].drop_duplicates())}"
lk = germany[germany["Landkreis"] == landkreis]
lk.index = pd.to_datetime(lk['Meldedatum'])
lk.index.name = 'date'
lk = lk.sort_index()
cases = lk["AnzahlFall"].groupby('date').agg('sum').cumsum()
region_label = f'Germany-{landkreis}'
cases.name = region_label + ' cases'
deaths = lk["AnzahlTodesfall"].groupby('date').agg('sum').cumsum()
deaths.name = region_label + ' deaths'
if pad2yesterday:
deaths = pad_cumulative_series_to_yesterday(deaths)
cases = pad_cumulative_series_to_yesterday(cases)
return cases, deaths
@joblib_memory.cache
def fetch_csv_data_from_url(source) -> pd.DataFrame:
"""Given a URL, fetch the csv using pandas. Put into separate function (from germany_get_population)
to avoid repeated download of file (for better performance)."""
data = pd.read_csv(source)
return data # type: ignore
def _germany_get_population_backup_data_raw() -> pd.DataFrame:
"""Function is not meant to be used directly.
Use germany_get_population() instead (which will call this function if required).
"""
# where is the backup file?
rki_population_backup_path = os.path.join(os.path.split(__file__)[0],
rki_population_backup_file)
population = pd.read_csv(rki_population_backup_path)
return population
@joblib_memory.cache
def germany_get_population() -> pd.DataFrame:
"""The function's behavior duplicates the one for `germany_get_region()` one."""
source = rki_population_url
population = fetch_csv_data_from_url(source)
try:
population = (
population
.set_index('county')
)
except KeyError as exception:
print(f"Couldn't retrieve population data from RKI ({rki_population_url}). "
"Using backup data from August 2020 instead "
"(https://github.com/oscovida/oscovida/blob/master/oscovida/backup_data/RKI_Corona_Landkreise.csv)"
)
population = _germany_get_population_backup_data_raw()
population = (
population
.set_index('county')
)
population = population.rename(columns={"EWZ": "population"})
# Some tidy up of the data:
# see https://github.com/oscovida/oscovida/issues/210
# try to remove this if-clause and see if tests fail:
if "LK Saar-Pfalz-Kreis" in population.index:
population.loc['LK Saarpfalz-Kreis'] = population.loc['LK Saar-Pfalz-Kreis']
population = population.drop('LK Saar-Pfalz-Kreis')
# 27 July 2021 - test fail because name is "Städteregion Aachen'" in actual data (
# i.e. 'StädteRegion' versus 'Städteregion' Aachen)
# see https://github.com/oscovida/oscovida/runs/3170956651?check_suite_focus=true#step:6:651
# if "StädteRegion Aachen" in population.index:
# population.loc['Städteregion Aachen'] = population.loc['StädteRegion Aachen']
# population = population.drop('StädteRegion Aachen')
#
# Update: 19 November 2021
# This behaviour now seems reversed, ie Aachen has the same name in population and RKI data now again.
# We leave the code above commented out, in case the problem
# comes back again.
return population # type: ignore
@joblib_memory.cache
def get_population() -> pd.DataFrame:
"""Returns a DataFrame with population data"""
source = jhu_population_url
population = fetch_csv_data_from_url(source)
# Only include population of entire countries by excluding rows that belong
# to a province or state
population = population[population['Province_State'].isnull()]
population = population[population["Population"] != 0]
population = (
population
.groupby('Country_Region')
.sum() # Here we sum over individual regions in a country to get the total country population
.rename(columns={"Population": "population"}) # Rename Population to population
)
return population # type: ignore
def population(country: str,
region: str = None, subregion: str = None) -> Union[int, None]:
"""
Returns an `int` which corresponds to the population.
Only supports JHU countries and Germany Landkreise so far.
Example:
$> population(country="Germany", subregion="LK Pinneberg")
316103
$> population(country="France")
65273512
"""
df = fetch_csv_data_from_url(jhu_population_url)\
.rename(columns={"Population": "population",
"Country_Region": "country",
"Province_State": "region",
"Admin2": "subregion"})
df = df[df['population'].notnull()]
if country in df.country.values:
if region is None and subregion is None:
# use JHU data
return int(df[(df['country'] == country)
& (df['region'].isnull())
& (df['subregion'].isnull())
].population)
elif region and subregion:
raise NotImplementedError("Cannot use both region and subregion")
elif country.casefold() == 'germany':
if region or subregion:
df = germany_get_population()
# XXX Aachen is broken:
# if subregion == "StädteRegion Aachen":
# subregion = "Städteregion Aachen"
if region in df['BL'].values:
return int(df[df['BL'] == region].population.sum())
elif subregion in df.index:
return int(df.population[subregion])
elif region in df.index: # silently try to use as subregion
return int(df.population[region])
else:
raise NotImplementedError(f"region={region} subregion={subregion} in neither in available German Lands nor in Landkreises. " \
f"These are {', '.join(sorted(df['BL'].drop_duplicates()))} for Lands and " \
f"{', '.join(sorted(df.index))} for Landkreises.")
else:
if region or subregion:
if region in df['region'].values:
combined_key = f"{region}, {country}"
if combined_key in df['Combined_Key'].values: # the total population of the region is known
return int(df[df['Combined_Key'] == combined_key].population)
else: # there's no total population in the dataset, we have to sum up on our own
return int(df[df['region'] == region].population.sum())
elif subregion in df['subregion'].values:
return int(df.population[subregion])
elif region in df['subregion']: # silently try to use as subregion
return int(df.population[subregion])
else:
return
else:
return
@joblib_memory.cache
def get_incidence_rates_countries(period=14):
cases = fetch_cases()
deaths = fetch_deaths()
cases = cases.groupby(cases.index).sum().astype(int)
deaths = deaths.groupby(deaths.index).sum().astype(int)
# Sanity checks that the column format is as expected
assert all(cases.columns == deaths.columns)
assert all(cases.columns[:2] == ["Lat", "Long"])
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
fortnight_ago = yesterday - datetime.timedelta(days=period+1)
periods = (fortnight_ago < pd.to_datetime(cases.columns[2:])) & (
| pd.to_datetime(cases.columns[2:]) | pandas.to_datetime |
from os.path import join as opj
import numpy as np
from pandas import read_sql_query, concat
import matplotlib.pylab as plt
import seaborn as sns
from configs.nucleus_style_defaults import Interrater as ir, NucleusCategories as ncg
from interrater.interrater_utils import _maybe_mkdir, \
_connect_to_anchor_db, get_roc_and_auroc_for_who, \
get_precision_recall_for_who
CLS = {
'main': [j for j in ncg.main_categs if j != 'AMBIGUOUS'],
'super': [j for j in ncg.super_categs if j != 'AMBIGUOUS'],
}
def _get_accuracy_stats(
dbcon, whoistruth: str, unbiased_is_truth: bool,
clsgroup: str, evalset=None):
evquery = '' if evalset is None else f'AND "evalset" = "{evalset}"'
# read accuracy stats
classes = ['detection', 'classification'] + CLS[clsgroup]
accuracy = read_sql_query(f"""
SELECT *
FROM "participant_AccuracyStats_{clsgroup}ClassGroup"
WHERE "whoistruth" = "{whoistruth}"
AND "unbiased_is_truth" = {0 + unbiased_is_truth}
AND "total" > 1
{evquery}
AND "participant" IN ({ir._get_sqlite_usrstr_for_who('All')})
AND "class" IN ({ir._get_sqlitestr_for_list(classes)})
;""", dbcon)
return classes, accuracy
def plot_participant_accuracy_stats(
dbcon, savedir: str, unbiased_is_truth: bool, whoistruth: str,
evalset: str, clsgroup: str):
""""""
truthstr = f'{"UNBIASED_" if unbiased_is_truth else ""}' \
f'{whoistruth}_AreTruth' # noqa
where = opj(savedir, truthstr)
_maybe_mkdir(where)
_maybe_mkdir(opj(where, 'csv'))
_maybe_mkdir(opj(where, 'plots'))
classes, accuracy = _get_accuracy_stats(
dbcon=dbcon, whoistruth=whoistruth, clsgroup=clsgroup,
unbiased_is_truth=unbiased_is_truth, evalset=evalset)
if whoistruth == 'Ps':
tpr, fpr, roc_auc = get_roc_and_auroc_for_who(
dbcon=dbcon, evalset=evalset, who='NPs', whoistruth=whoistruth,
unbiased_is_truth=unbiased_is_truth, clsgroup=clsgroup)
# to save raw values for calculating p-values later
overalldf = []
# organize canvas and plot
nperrow = 4 if len(classes) <= 4 else 3
nrows = int(np.ceil((len(classes)) / nperrow))
fig, ax = plt.subplots(nrows, nperrow, figsize=(5 * nperrow, 5.5 * nrows))
scprops = {'alpha': 0.75, 's': 9 ** 2, 'edgecolor': 'k'}
axno = -1
for axis in ax.ravel():
axno += 1
if axno == len(classes):
break
cls = classes[axno]
isdetection = cls == 'detection'
for who in ['NPs', 'JPs', 'SPs']:
pstyle = ir.PARTICIPANT_STYLES[who]
scprops.update({k: pstyle[k] for k in ['c', 'marker']})
keep = accuracy.loc[:, 'participant'].apply(
lambda x: x in ir.who[who])
dfslice = accuracy.loc[keep, :]
dfslice = dfslice.loc[dfslice.loc[:, 'class'] == cls, :]
overalldf.append(dfslice)
# add PR / ROC curve for inferred truth (from NPs)
# versus the "actual" inferred truth (from SPs)
if (whoistruth == 'Ps') and (who == 'NPs'):
lprops = {'color': scprops['c'], 'alpha': 0.7, 'linewidth': 2}
if isdetection:
# get precision-recalll curve
prc = get_precision_recall_for_who(
dbcon=dbcon, evalset=evalset, who='NPs',
whoistruth=whoistruth,
unbiased_is_truth=unbiased_is_truth)
# plot
axis.plot(
prc['recall'], prc['precision'], linestyle='-',
label=f'{who} "Truth" (AP=%0.2f)' % prc['AP'],
**lprops
)
axis.axhline(
prc['random'], xmin=0., xmax=1., c='gray',
linestyle='--', label='Random guess')
elif cls == 'classification':
axis.plot(
fpr['micro'], tpr['micro'], linestyle='-', # noqa
label=f'{who} "Truth" - MicroAvg (AUC=%0.2f)'
% roc_auc['micro'], # noqa
**lprops
)
axis.plot(
fpr['macro'], tpr['macro'], linestyle='--',
label=f'{who} "Truth" - MacroAvg (AUC=%0.2f)'
% roc_auc['macro'],
**lprops
)
else:
axis.plot(
fpr[cls], tpr[cls], linestyle='-', # noqa
label=f'{who} "Truth" (AUC=%0.2f)' % roc_auc[cls], # noqa
**lprops
)
# scatter the various participants
if isdetection:
axis.scatter(
dfslice.loc[:, 'recall'], dfslice.loc[:, 'precision'],
label=f'{who}', **scprops)
else:
axis.scatter(
1 - dfslice.loc[:, 'specificity'],
dfslice.loc[:, 'sensitivity'],
label=f'{who}', **scprops)
if isdetection:
xlab, ylab = ('Recall (Sensitivity)', 'Precision (PPV)')
else:
axis.plot(
[0., 0.5, 1.0], [0., 0.5, 1.0], c='gray',
linestyle='--', label='Random guess')
xlab, ylab = ('1 - Specificity (FPR)', 'Sensitivity (TPR)')
axis.set_xlim(-0.02, 1.02)
axis.set_ylim(-0.02, 1.02)
axis.set_aspect('equal')
axis.set_title(cls.capitalize(), fontsize=14, fontweight='bold')
axis.set_xlabel(xlab, fontsize=11)
axis.set_ylabel(ylab, fontsize=11)
axis.legend(fontsize=8)
# save plot
plt.tight_layout(pad=0.3, w_pad=0.5, h_pad=0.3)
savename = f'{truthstr}_{evalset}_accuracy_stats'
plt.savefig(opj(where, 'plots', savename + '.svg'))
plt.close()
# save raw numbers
overalldf = concat(overalldf, axis=0, ignore_index=True)
overalldf.to_csv(opj(where, 'csv', savename + '.csv'))
def plot_participant_accuracy_stats_v2(
dbcon, savedir: str, unbiased_is_truth: bool, whoistruth: str,
clsgroup: str):
""""""
truthstr = f'{"UNBIASED_" if unbiased_is_truth else ""}' \
f'{whoistruth}_AreTruth'
where = opj(savedir, truthstr)
_maybe_mkdir(where)
_maybe_mkdir(opj(where, 'plots'))
classes, accuracy = _get_accuracy_stats(
dbcon=dbcon, whoistruth=whoistruth, clsgroup=clsgroup,
unbiased_is_truth=unbiased_is_truth)
# to save raw values for calculating p-values later
overalldf = []
# reorder evalsets
tmp = []
for evalset in ir.MAIN_EVALSET_NAMES:
tmp.append(accuracy.loc[accuracy.loc[:, 'evalset'] == evalset, :])
accuracy = concat(tmp, axis=0)
# organize canvas and plot
nperrow = 4 if len(classes) <= 4 else 3
nrows = int(np.ceil((len(classes)) / nperrow))
fig, ax = plt.subplots(nrows, nperrow, figsize=(5 * nperrow, 5.5 * nrows))
scprops = {'alpha': 0.7, 's': 7 ** 2, 'edgecolor': 'k'}
axno = -1
for axis in ax.ravel():
axno += 1
if axno == len(classes):
break
cls = classes[axno]
metric = 'F1' if cls == 'detection' else 'MCC'
dfslice = accuracy.loc[accuracy.loc[:, 'class'] == cls, :].copy()
dfslice.index = dfslice.loc[:, 'participant']
dfslice.loc[:, 'who'] = 'NPs'
for who in ['JPs', 'SPs']:
for p in dfslice.index:
if p in ir.who[who]:
dfslice.loc[p, 'who'] = who
dfslice.loc[:, 'swho'] = dfslice.loc[:, 'who'].copy()
dfslice.loc[dfslice.loc[:, 'swho'] == 'SPs', 'swho'] = 'Ps'
dfslice.loc[dfslice.loc[:, 'swho'] == 'JPs', 'swho'] = 'Ps'
dfslice = dfslice.loc[:, ['class', 'evalset', metric, 'who', 'swho']]
overalldf.append(dfslice)
# main boxplots
bppr = {'alpha': 0.5}
sns.boxplot(
ax=axis, data=dfslice, x='evalset', y=metric, hue='swho',
palette=[ir.PARTICIPANT_STYLES[who]['c'] for who in ['Ps', 'NPs']],
boxprops=bppr, whiskerprops=bppr, capprops=bppr, medianprops=bppr,
showfliers=False,
# notch=True, bootstrap=5000,
)
# scatter each participant group
for who in ['NPs', 'JPs', 'SPs']:
pstyle = ir.PARTICIPANT_STYLES[who]
scprops.update({k: pstyle[k] for k in ['c', 'marker']})
plotme = dfslice.loc[dfslice.loc[:, 'who'] == who, :].copy()
offset = -0.2 if who in ['JPs', 'SPs'] else 0.2
plotme.loc[:, 'x'] = plotme.loc[:, 'evalset'].apply(
lambda x: ir.MAIN_EVALSET_NAMES.index(x) + offset)
plotme = np.array(plotme.loc[:, ['x', metric]])
# add jitter
plotme[:, 0] += 0.05 * np.random.randn(plotme.shape[0])
# now scatter
axis.scatter(
plotme[:, 0], plotme[:, 1], label=f'{who}', **scprops)
axis.set_ylim(0., 1.)
# axis.set_ylim(0.5, 1.)
axis.set_title(cls.capitalize(), fontsize=14, fontweight='bold')
axis.set_ylabel(metric.capitalize(), fontsize=11)
axis.legend()
plt.tight_layout(pad=0.3, w_pad=0.5, h_pad=0.3)
savename = f'{truthstr}_evalset_accuracy_comparison'
plt.savefig(opj(where, 'plots', savename + '.svg'))
plt.close()
# save raw numbers
overalldf = | concat(overalldf, axis=0, ignore_index=True) | pandas.concat |
# Some utilites functions for loading the data, adding features
import numpy as np
import pandas as pd
from functools import reduce
from sklearn.preprocessing import MinMaxScaler
def load_csv(path):
"""Load dataframe from a csv file
Args:
path (STR): File path
"""
# Load the file
df = pd.read_csv(path)
# Lowercase column names
df.rename(columns=lambda x: x.lower().strip(), inplace=True)
return df
def fill_missing_values(df):
"""Fill the missing data points
Args:
df: Input dataframe
Return: the modified dataframe
"""
# Get datetime col
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
pdlist = []
for z in df.zone_code.unique():
zone = df[df['zone_code'] == z]
r = pd.date_range(zone.ds.min(), zone.ds.max(), freq='H')
ds_range = pd.DataFrame({'ds': r, 'zone_code': z})
zone_merged = ds_range.merge(zone, how='left', on=['ds', 'zone_code'])
zone_merged['hour_id'] = zone_merged['ds'].dt.hour
# Fill the null values
for col in ['bandwidth_total', 'max_user']:
for index, row in zone_merged[zone_merged[col].isnull()].iterrows():
shifted_index = index - (24*7)
flag = True
while flag:
fill_val = zone_merged.loc[shifted_index, col]
if pd.isnull(fill_val):
shifted_index -= (24*7)
continue
zone_merged.loc[index, col] = fill_val
flag = False
pdlist.append(zone_merged)
out = pd.concat(pdlist)
out.drop(['update_time'], axis=1, inplace=True)
assert not out.isnull().values.any(), 'Error in asserting. There are still nans.'
return out
def add_time_features(df, test=False):
"""Add time features for the data
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
if test:
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
else:
df['update_time'] = df['ds'].dt.date
df['dow'] = df['ds'].dt.dayofweek
df['month'] = df['ds'].dt.month
df['doy'] = df['ds'].dt.dayofyear
df['year'] = df['ds'].dt.year
df['day'] = df['ds'].dt.day
df['week'] = df['ds'].dt.week
# df['weekend'] = df['dow'] // 5 == 1
# Normalise day of week col
week_period = 7 / (2 * np.pi)
df['dow_norm'] = df.dow.values / week_period
return df
def add_time_periods(df):
"""Add time periods of a day
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
df['hour_id'] = | pd.to_numeric(df['hour_id']) | pandas.to_numeric |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.